diff --git a/src/boutdata/cbdtoeqdsk.py b/src/boutdata/cbdtoeqdsk.py deleted file mode 100644 index aa6d7ac..0000000 --- a/src/boutdata/cbdtoeqdsk.py +++ /dev/null @@ -1,26 +0,0 @@ -import numpy as np - - -class Bunch: - pass - - -def cbmtogeqdsk(g): - gg = Bunch() - gg.r = g["Rxy"] - gg.z = g["Zxy"] - gg.psi = g["psi"] - gg.pres = g["mu0p"] - gg.qpsi = g["qsafe"] - gg.fpol = g["f"] - gg.nx = g["nx"] - gg.ny = g["ny"] - i = np.argwhere(g["mu0p"] == 0) - - gg.simagx = gg.psi.min() - gg.sibdry = gg.psi[i[0]] - gg.xlim = 0 - gg.ylim = 0 - gg.nlim = 0 - - return gg diff --git a/src/boututils/analyse_equil_2.py b/src/boututils/analyse_equil_2.py deleted file mode 100644 index 4a79822..0000000 --- a/src/boututils/analyse_equil_2.py +++ /dev/null @@ -1,290 +0,0 @@ -"""Equilibrium analysis routine - -Takes a RZ psi grid, and finds x-points and o-points -""" - -import numpy -from crosslines import find_inter -from matplotlib.pyplot import annotate, contour, draw, gradient, plot -from past.utils import old_div -from scipy.interpolate import RectBivariateSpline - -from boututils.bunch import Bunch - -from . import local_min_max - - -def analyse_equil(F, R, Z): - """Takes an RZ psi grid, and finds x-points and o-points - - Parameters - ---------- - F : array_like - 2-D array of psi values - R : array_like - 1-D array of major radii, its length should be the same as the - first dimension of F - Z : array_like - 1-D array of heights, its length should be the same as the - second dimension of F - - Returns - ------- - object - An object of critical points containing: - - n_opoint, n_xpoint - Number of O- and X-points - primary_opt - Index of plasma centre O-point - inner_sep - X-point index of inner separatrix - opt_ri, opt_zi - R and Z indices for each O-point - opt_f - Psi value at each O-point - xpt_ri, xpt_zi - R and Z indices for each X-point - xpt_f - Psi value of each X-point - - """ - s = numpy.shape(F) - nx = s[0] - ny = s[1] - - # ;;;;;;;;;;;;;;; Find critical points ;;;;;;;;;;;;; - # - # Need to find starting locations for O-points (minima/maxima) - # and X-points (saddle points) - # - Rr = numpy.tile(R, nx).reshape(nx, ny).T - Zz = numpy.tile(Z, ny).reshape(nx, ny) - - contour1 = contour(Rr, Zz, gradient(F)[0], levels=[0.0], colors="r") - contour2 = contour(Rr, Zz, gradient(F)[1], levels=[0.0], colors="r") - - draw() - - # ## 1st method - line crossings --------------------------- - res = find_inter(contour1, contour2) - - # rex1=numpy.interp(res[0], R, numpy.arange(R.size)).astype(int) - # zex1=numpy.interp(res[1], Z, numpy.arange(Z.size)).astype(int) - - rex1 = res[0] - zex1 = res[1] - - w = numpy.where( - (rex1 > R[2]) & (rex1 < R[nx - 3]) & (zex1 > Z[2]) & (zex1 < Z[nx - 3]) - ) - nextrema = numpy.size(w) - rex1 = rex1[w].flatten() - zex1 = zex1[w].flatten() - - # ## 2nd method - local maxima_minima ----------------------- - res1 = local_min_max.detect_local_minima(F) - res2 = local_min_max.detect_local_maxima(F) - res = numpy.append(res1, res2, 1) - - rex2 = res[0, :].flatten() - zex2 = res[1, :].flatten() - - w = numpy.where((rex2 > 2) & (rex2 < nx - 3) & (zex2 > 2) & (zex2 < nx - 3)) - nextrema = numpy.size(w) - rex2 = rex2[w].flatten() - zex2 = zex2[w].flatten() - - n_opoint = nextrema - n_xpoint = numpy.size(rex1) - n_opoint - - # Needed for interp below - - Rx = numpy.arange(numpy.size(R)) - Zx = numpy.arange(numpy.size(Z)) - - print("Number of O-points: " + numpy.str(n_opoint)) - print("Number of X-points: " + numpy.str(n_xpoint)) - - # Deduce the O & X points - - x = R[rex2] - y = Z[zex2] - - dr = old_div((R[numpy.size(R) - 1] - R[0]), numpy.size(R)) - dz = old_div((Z[numpy.size(Z) - 1] - Z[0]), numpy.size(Z)) - - repeated = set() - for i in range(numpy.size(rex1)): - for j in range(numpy.size(x)): - if ( - numpy.abs(rex1[i] - x[j]) < 2 * dr - and numpy.abs(zex1[i] - y[j]) < 2 * dz - ): - repeated.add(i) - - # o-points - - o_ri = numpy.take(rex1, numpy.array(list(repeated))) - opt_ri = numpy.interp(o_ri, R, Rx) - o_zi = numpy.take(zex1, numpy.array(list(repeated))) - opt_zi = numpy.interp(o_zi, Z, Zx) - opt_f = numpy.zeros(numpy.size(opt_ri)) - func = RectBivariateSpline(Rx, Zx, F) - for i in range(numpy.size(opt_ri)): - opt_f[i] = func(opt_ri[i], opt_zi[i]) - - n_opoint = numpy.size(opt_ri) - - # x-points - - x_ri = numpy.delete(rex1, numpy.array(list(repeated))) - xpt_ri = numpy.interp(x_ri, R, Rx) - x_zi = numpy.delete(zex1, numpy.array(list(repeated))) - xpt_zi = numpy.interp(x_zi, Z, Zx) - xpt_f = numpy.zeros(numpy.size(xpt_ri)) - func = RectBivariateSpline(Rx, Zx, F) - for i in range(numpy.size(xpt_ri)): - xpt_f[i] = func(xpt_ri[i], xpt_zi[i]) - - n_xpoint = numpy.size(xpt_ri) - - # plot o-points - - plot(o_ri, o_zi, "o", markersize=10) - - labels = ["{0}".format(i) for i in range(o_ri.size)] - for label, xp, yp in zip(labels, o_ri, o_zi): - annotate( - label, - xy=(xp, yp), - xytext=(10, 10), - textcoords="offset points", - size="large", - color="b", - ) - - draw() - - # plot x-points - - plot(x_ri, x_zi, "x", markersize=10) - - labels = ["{0}".format(i) for i in range(x_ri.size)] - for label, xp, yp in zip(labels, x_ri, x_zi): - annotate( - label, - xy=(xp, yp), - xytext=(10, 10), - textcoords="offset points", - size="large", - color="r", - ) - - draw() - - print("Number of O-points: " + str(n_opoint)) - - if n_opoint == 0: - raise RuntimeError("No O-points! Giving up on this equilibrium") - - # ;;;;;;;;;;;;;; Find plasma centre ;;;;;;;;;;;;;;;;;;; - # Find the O-point closest to the middle of the grid - - mind = (opt_ri[0] - (old_div(numpy.float(nx), 2.0))) ** 2 + ( - opt_zi[0] - (old_div(numpy.float(ny), 2.0)) - ) ** 2 - ind = 0 - for i in range(1, n_opoint): - d = (opt_ri[i] - (old_div(numpy.float(nx), 2.0))) ** 2 + ( - opt_zi[i] - (old_div(numpy.float(ny), 2.0)) - ) ** 2 - if d < mind: - ind = i - mind = d - - primary_opt = ind - print( - "Primary O-point is at " - + numpy.str(numpy.interp(opt_ri[ind], numpy.arange(numpy.size(R)), R)) - + ", " - + numpy.str(numpy.interp(opt_zi[ind], numpy.arange(numpy.size(Z)), Z)) - ) - print("") - - if n_xpoint > 0: - # Find the primary separatrix - - # First remove non-monotonic separatrices - nkeep = 0 - for i in range(n_xpoint): - # Draw a line between the O-point and X-point - - n = 100 # Number of points - farr = numpy.zeros(n) - dr = old_div((xpt_ri[i] - opt_ri[ind]), numpy.float(n)) - dz = old_div((xpt_zi[i] - opt_zi[ind]), numpy.float(n)) - for j in range(n): - # interpolate f at this location - func = RectBivariateSpline(Rx, Zx, F) - - farr[j] = func( - opt_ri[ind] + dr * numpy.float(j), opt_zi[ind] + dz * numpy.float(j) - ) - - # farr should be monotonic, and shouldn't cross any other separatrices - - maxind = numpy.argmax(farr) - minind = numpy.argmin(farr) - if maxind < minind: - maxind, minind = minind, maxind - - # Allow a little leeway to account for errors - # NOTE: This needs a bit of refining - if (maxind > (n - 3)) and (minind < 3): - # Monotonic, so add this to a list of x-points to keep - if nkeep == 0: - keep = [i] - else: - keep = numpy.append(keep, i) - - nkeep = nkeep + 1 - - if nkeep > 0: - print("Keeping x-points ", keep) - xpt_ri = xpt_ri[keep] - xpt_zi = xpt_zi[keep] - xpt_f = xpt_f[keep] - else: - "No x-points kept" - - n_xpoint = nkeep - - # Now find x-point closest to primary O-point - s = numpy.argsort(numpy.abs(opt_f[ind] - xpt_f)) - xpt_ri = xpt_ri[s] - xpt_zi = xpt_zi[s] - xpt_f = xpt_f[s] - inner_sep = 0 - - else: - # No x-points. Pick mid-point in f - - xpt_f = 0.5 * (numpy.max(F) + numpy.min(F)) - - print("WARNING: No X-points. Setting separatrix to F = " + str(xpt_f)) - - xpt_ri = 0 - xpt_zi = 0 - inner_sep = 0 - - # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - # Put results into a structure - - result = Bunch( - n_opoint=n_opoint, - n_xpoint=n_xpoint, # Number of O- and X-points - primary_opt=primary_opt, # Which O-point is the plasma centre - inner_sep=inner_sep, # Innermost X-point separatrix - opt_ri=opt_ri, - opt_zi=opt_zi, - opt_f=opt_f, # O-point location (indices) and psi values - xpt_ri=xpt_ri, - xpt_zi=xpt_zi, - xpt_f=xpt_f, - ) # X-point locations and psi values - - return result diff --git a/src/boututils/closest_line.py b/src/boututils/closest_line.py deleted file mode 100644 index d8335c0..0000000 --- a/src/boututils/closest_line.py +++ /dev/null @@ -1,14 +0,0 @@ -import numpy - - -# Find the closest contour line to a given point -def closest_line(n, x, y, ri, zi, mind=None): - mind = numpy.min((x[0] - ri) ** 2 + (y[0] - zi) ** 2) - ind = 0 - - for i in range(1, n): - d = numpy.min((x[i] - ri) ** 2 + (y[i] - zi) ** 2) - if d < mind: - mind = d - ind = i - return ind diff --git a/src/boututils/crosslines.py b/src/boututils/crosslines.py deleted file mode 100644 index ae32251..0000000 --- a/src/boututils/crosslines.py +++ /dev/null @@ -1,234 +0,0 @@ -import itertools - -import numpy as np -from numpy.lib.stride_tricks import as_strided -from past.utils import old_div - - -def unique(a, atol=1e-08): - """Find unique rows in 2d array - - Parameters - ---------- - a : 2d ndarray, float - array to find unique rows in - atol : float, optional - tolerance to check uniqueness - - Returns - ------- - out : 2d ndarray, float - array of unique values - - Notes - ----- - Adapted to include tolerance from code at - https://stackoverflow.com/questions/8560440/removing-duplicate-columns-and-rows-from-a-numpy-2d-array#answer-8564438 - """ - - if np.issubdtype(a.dtype, float): - order = np.lexsort(a.T) - a = a[order] - diff = np.diff(a, axis=0) - np.abs(diff, out=diff) - ui = np.ones(len(a), "bool") - ui[1:] = (diff > atol).any(axis=1) - return a[ui] - else: - order = np.lexsort(a.T) - a = a[order] - diff = np.diff(a, axis=0) - ui = np.ones(len(a), "bool") - ui[1:] = (diff != 0).any(axis=1) - return a[ui] - - -def linelineintersect(a, b, atol=1e-08): - """Find all intersection points of two lines defined by series of x,y pairs - - Intersection points are unordered - Colinear lines that overlap intersect at any end points that fall within the overlap - - Parameters - ---------- - a, b : ndarray - 2 column ndarray of x,y values defining a two dimensional line. 1st - column is x values, 2nd column is x values. - - Notes - ----- - An example of 3 segment line is: a = numpy.array([[0,0],[5.0,3.0],[4.0,1]]) - Function faster when there are no overlapping line segments - - - add some lines for preventing zero-division - """ - - def x_y_from_line(line): - """1st and 2nd column of array""" - return (line[:, 0], line[:, 1]) - - def meshgrid_as_strided(x, y, mask=None): - """numpy.meshgrid without copying data (using as_strided)""" - if mask is None: - return ( - as_strided(x, strides=(0, x.strides[0]), shape=(y.size, x.size)), - as_strided(y, strides=(y.strides[0], 0), shape=(y.size, x.size)), - ) - else: - return ( - np.ma.array( - as_strided(x, strides=(0, x.strides[0]), shape=(y.size, x.size)), - mask=mask, - ), - np.ma.array( - as_strided(y, strides=(y.strides[0], 0), shape=(y.size, x.size)), - mask=mask, - ), - ) - - # In the following the indices i, j represents the pairing of the ith segment of b - # and the jth segment of a - # e.g. if ignore[i,j]==True then the ith segment of b and the jth segment of a - # cannot intersect - ignore = np.zeros([b.shape[0] - 1, a.shape[0] - 1], dtype=bool) - - x11, x21 = meshgrid_as_strided(a[:-1, 0], b[:-1, 0], mask=ignore) - x12, x22 = meshgrid_as_strided(a[1:, 0], b[1:, 0], mask=ignore) - y11, y21 = meshgrid_as_strided(a[:-1, 1], b[:-1, 1], mask=ignore) - y12, y22 = meshgrid_as_strided(a[1:, 1], b[1:, 1], mask=ignore) - - # ignore segements with non-overlappiong bounding boxes - ignore[np.ma.maximum(x11, x12) < np.ma.minimum(x21, x22)] = True - ignore[np.ma.minimum(x11, x12) > np.ma.maximum(x21, x22)] = True - ignore[np.ma.maximum(y11, y12) < np.ma.minimum(y21, y22)] = True - ignore[np.ma.minimum(y11, y12) > np.ma.maximum(y21, y22)] = True - - # find intersection of segments, ignoring impossible line segment pairs when new - # info becomes available - denom_ = np.empty(ignore.shape, dtype=float) - denom = np.ma.array(denom_, mask=ignore) - denom_[:, :] = ((y22 - y21) * (x12 - x11)) - ((x22 - x21) * (y12 - y11)) - # denom_tmp = ((y22 - y21) * (x12 - x11)) - ((x22 - x21) * (y12 - y11)) # H.SETO - denom_[:, :] = np.where(denom_ == 0.0, 1.0e-100, denom_) - - ua_ = np.empty(ignore.shape, dtype=float) - ua = np.ma.array(ua_, mask=ignore) - ua_[:, :] = ((x22 - x21) * (y11 - y21)) - ((y22 - y21) * (x11 - x21)) - ua_[:, :] /= denom - - ignore[ua < 0] = True - ignore[ua > 1] = True - - ub_ = np.empty(ignore.shape, dtype=float) - ub = np.ma.array(ub_, mask=ignore) - ub_[:, :] = ((x12 - x11) * (y11 - y21)) - ((y12 - y11) * (x11 - x21)) - ub_[:, :] /= denom - - ignore[ub < 0] = True - ignore[ub > 1] = True - - nans_ = np.zeros(ignore.shape, dtype=bool) - nans = np.ma.array(nans_, mask=ignore) - nans_[:, :] = np.isnan(ua) - - if not np.ma.any(nans): - # remove duplicate cases where intersection happens on an endpoint - # ignore[np.ma.where((ua[:, :-1] == 1) & (ua[:, 1:] == 0))] = True - # ignore[np.ma.where((ub[:-1, :] == 1) & (ub[1:, :] == 0))] = True - ignore[ - np.ma.where( - (ua[:, :-1] < 1.0 + atol) - & (ua[:, :-1] > 1.0 - atol) - & (ua[:, 1:] < atol) - & (ua[:, 1:] > -atol) - ) - ] = True - ignore[ - np.ma.where( - (ub[:-1, :] < 1 + atol) - & (ub[:-1, :] > 1 - atol) - & (ub[1:, :] < atol) - & (ub[1:, :] > -atol) - ) - ] = True - - xi = x11 + ua * (x12 - x11) - yi = y11 + ua * (y12 - y11) - return np.ma.compressed(xi), np.ma.compressed(yi) - else: - n_nans = np.ma.sum(nans) - n_standard = np.ma.count(x11) - n_nans - # I initially tried using a set to get unique points but had problems with - # floating point equality - - # create n by 2 array to hold all possible intersection points, - # check later for uniqueness - points = np.empty( - [n_standard + 2 * n_nans, 2], dtype=float - ) # each colinear segment pair has two intersections, - # hence the extra n_colinear points - - # add standard intersection points - xi = x11 + ua * (x12 - x11) - yi = y11 + ua * (y12 - y11) - points[:n_standard, 0] = np.ma.compressed(xi[~nans]) - points[:n_standard, 1] = np.ma.compressed(yi[~nans]) - ignore[~nans] = True - - # now add the appropriate intersection points for the colinear overlapping - # segments colinear overlapping segments intersect on the two internal points - # of the four points describing a straight line ua and ub have already - # serverd their purpose. Reuse them here to mean: - # ua is relative position of 1st b segment point along segment a - # ub is relative position of 2nd b segment point along segment a - use_x = x12 != x11 # avoid vertical lines diviiding by zero - use_y = ~use_x - - ua[use_x] = old_div((x21[use_x] - x11[use_x]), (x12[use_x] - x11[use_x])) - ua[use_y] = old_div((y21[use_y] - y11[use_y]), (y12[use_y] - y11[use_y])) - - ub[use_x] = old_div((x22[use_x] - x11[use_x]), (x12[use_x] - x11[use_x])) - ub[use_y] = old_div((y22[use_y] - y11[use_y]), (y12[use_y] - y11[use_y])) - - np.ma.clip(ua, a_min=0, a_max=1, out=ua) - np.ma.clip(ub, a_min=0, a_max=1, out=ub) - - xi = x11 + ua * (x12 - x11) - yi = y11 + ua * (y12 - y11) - points[n_standard : n_standard + n_nans, 0] = np.ma.compressed(xi) - points[n_standard : n_standard + n_nans, 1] = np.ma.compressed(yi) - - xi = x11 + ub * (x12 - x11) - yi = y11 + ub * (y12 - y11) - points[n_standard + n_nans :, 0] = np.ma.compressed(xi) - points[n_standard + n_nans :, 1] = np.ma.compressed(yi) - - points_unique = unique(points) - return points_unique[:, 0], points_unique[:, 1] - - -def find_inter(contour1, contour2): - xi = np.array([]) - yi = np.array([]) - - i = 0 - ncombos = sum([len(x.get_paths()) for x in contour1.collections]) * sum( - [len(x.get_paths()) for x in contour2.collections] - ) - for linecol1, linecol2 in itertools.product( - contour1.collections, contour2.collections - ): - for path1, path2 in itertools.product( - linecol1.get_paths(), linecol2.get_paths() - ): - i += 1 - print("line combo %5d of %5d" % (i, ncombos)) - xinter, yinter = linelineintersect(path1.vertices, path2.vertices) - - xi = np.append(xi, xinter) - yi = np.append(yi, yinter) - - # plt.scatter(xi, yi, s=20) - # plt.show() - return xi, yi diff --git a/src/boututils/efit_analyzer.py b/src/boututils/efit_analyzer.py deleted file mode 100644 index 8822524..0000000 --- a/src/boututils/efit_analyzer.py +++ /dev/null @@ -1,421 +0,0 @@ -# -*- coding: utf-8 -*- - - -import numpy as np -from past.utils import old_div -from pylab import ( - clabel, - cm, - contour, - draw, - figure, - legend, - plot, - setp, - show, - streamplot, - subplot2grid, - text, - tick_params, -) -from scipy import interpolate - -from .analyse_equil_2 import analyse_equil -from .ask import query_yes_no - -# from .radial_grid import radial_grid -from .read_geqdsk import read_geqdsk - -# from boututils.bunch import Bunch - - -def View2D(g, option=0): - # plot and check the field - fig = figure(num=2, figsize=(16, 6)) - # fig.suptitle('Efit Analysis', fontsize=20) - - ax = subplot2grid((3, 3), (0, 0), colspan=1, rowspan=3) - nlev = 100 - minf = np.min(g.psi) - maxf = np.max(g.psi) - levels = np.arange(np.float(nlev)) * (maxf - minf) / np.float(nlev - 1) + minf - ax.contour(g.r, g.z, g.psi, levels=levels) - # ax.set_xlim([0,6]) - ax.set_xlabel("R") - ax.set_ylabel("Z") - ax.yaxis.label.set_rotation("horizontal") - - ax.set_aspect("equal") - - # fig.suptitle('Efit Analysis', fontsize=20) - # title('Efit Analysis', fontsize=20) - text( - 0.5, - 1.08, - "Efit Analysis", - horizontalalignment="center", - fontsize=20, - transform=ax.transAxes, - ) - - draw() - if option == 0: - show(block=False) - - plot(g.xlim, g.ylim, "g-") - - draw() - - csb = contour(g.r, g.z, g.psi, levels=[g.sibdry]) - - clabel(csb, [g.sibdry], inline=1, fmt="%9.6f", fontsize=14) # label the level - - csb.collections[0].set_label("boundary") - - # pl1=plot(g.rbdry,g.zbdry,'b-',marker='x', label='$\psi=$'+ np.str(g.sibdry)) - # legend(bbox_to_anchor=(0., 1.05, 1., .105), loc='upper left') - - draw() - - # fig.set_tight_layout(True) - - # show(block=False) - - # Function fpol and qpsi are given between simagx (psi on the axis) and sibdry ( - # psi on limiter or separatrix). So the toroidal field (fpol/R) and the q profile - # are within these boundaries - - npsigrid = old_div(np.arange(np.size(g.pres)).astype(float), (np.size(g.pres) - 1)) - - fpsi = np.zeros((2, np.size(g.fpol)), np.float64) - fpsi[0, :] = g.simagx + npsigrid * (g.sibdry - g.simagx) - fpsi[1, :] = g.fpol - - # boundary = np.array([g.xlim, g.ylim]) - # - # rz_grid = Bunch( - # nr=g.nx, - # nz=g.ny, # Number of grid points - # r=g.r[:, 0], - # z=g.z[0, :], # R and Z as 1D arrays - # simagx=g.simagx, - # sibdry=g.sibdry, # Range of psi - # psi=g.psi, # Poloidal flux in Weber/rad on grid points - # npsigrid=npsigrid, # Normalised psi grid for fpol, pres and qpsi - # fpol=g.fpol, # Poloidal current function on uniform flux grid - # pres=g.pres, # Plasma pressure in nt/m^2 on uniform flux grid - # qpsi=g.qpsi, # q values on uniform flux grid - # nlim=g.nlim, - # rlim=g.xlim, - # zlim=g.ylim, - # ) # Wall boundary - - critical = analyse_equil(g.psi, g.r[:, 0], g.z[0, :]) - - # n_opoint = critical.n_opoint - # n_xpoint = critical.n_xpoint - primary_opt = critical.primary_opt - # inner_sep = critical.inner_sep - opt_ri = critical.opt_ri - opt_zi = critical.opt_zi - # opt_f = critical.opt_f - # xpt_ri = critical.xpt_ri - # xpt_zi = critical.xpt_zi - # xpt_f = critical.xpt_f - # - # psi_inner = 0.6 - # psi_outer = (0.8,) - # nrad = 68 - # npol = 64 - # rad_peaking = [0.0] - # pol_peaking = [0.0] - # parweight = 0.0 - - # boundary = np.array([rz_grid.rlim, rz_grid.zlim]) - - # Psi normalisation factors - - # faxis = critical.opt_f[critical.primary_opt] - # - # fnorm = critical.xpt_f[critical.inner_sep] - critical.opt_f[critical.primary_opt] - - # From normalised psi, get range of f - # f_inner = faxis + np.min(psi_inner) * fnorm - # f_outer = faxis + np.max(psi_outer) * fnorm - - # fvals = radial_grid(nrad, f_inner, f_outer, 1, 1, [xpt_f[inner_sep]], rad_peaking) - - # # Create a starting surface - # sind = np.int(nrad / 2) - # start_f = 0. #fvals[sind] - - # Find where we have rational surfaces - # define an interpolation of psi(q) - - psiq = ( - np.arange(np.float(g.qpsi.size)) - * (g.sibdry - g.simagx) - / np.float(g.qpsi.size - 1) - + g.simagx - ) - fpsiq = interpolate.interp1d(g.qpsi, psiq) - - # Find how many rational surfaces we have within the boundary and locate x,y - # position of curves - - nmax = g.qpsi.max().astype(int) - nmin = g.qpsi.min().astype(int) - - nr = np.arange(nmin + 1, nmax + 1) - psi = fpsiq(nr) - - cs = contour(g.r, g.z, g.psi, levels=psi) - labels = ["$q=" + np.str(x) + "$\n" for x in range(nr[0], nr[-1] + 1)] - - for i in range(len(labels)): - cs.collections[i].set_label(labels[i]) - - style = ["--", ":", "--", ":", "-."] - - # gca().set_color_cycle(col) - - # proxy = [Rectangle((0,0),1,1, fc=col[:i+1]) - # for pc in cs.collections] - # - # l2=legend(proxy, textstr[:i) - # - # gca().add_artist(l1) - - x = [] - y = [] - for i in range(psi.size): - xx, yy = surface( - cs, i, psi[i], opt_ri[primary_opt], opt_zi[primary_opt], style, option - ) - x.append(xx) - y.append(yy) - - # props = dict(boxstyle='round', facecolor='wheat', alpha=0.5) - # textstr = ['$q='+np.str(x)+'$\n' for x in range(psi.size)] - # - # ax.text(0.85, 1.15, ''.join(textstr), transform=ax.transAxes, fontsize=14, - # verticalalignment='top', bbox=props) - - legend(bbox_to_anchor=(-0.8, 1), loc="upper left", borderaxespad=0.0) - - draw() - - # compute B - field - - Bp = np.gradient(g.psi) - - dr = old_div((np.max(g.r[:, 0]) - np.min(g.r[:, 0])), np.size(g.r[:, 0])) - dz = old_div((np.max(g.z[0, :]) - np.min(g.z[0, :])), np.size(g.z[0, :])) - - dpsidr = Bp[0] - dpsidz = Bp[1] - - Br = -dpsidz / dz / g.r - Bz = dpsidr / dr / g.r - - Bprz = np.sqrt(Br * Br + Bz * Bz) - - # plot Bp field - if option == 0: - sm = query_yes_no("Overplot vector field") - if sm: - # lw = 50 * Bprz / Bprz.max() - streamplot( - g.r.T, g.z.T, Br.T, Bz.T, color=Bprz, linewidth=2, cmap=cm.bone - ) # density =[.5, 1], color='k')#, linewidth=lw) - draw() - - # plot toroidal field - - ax = subplot2grid((3, 3), (0, 1), colspan=2, rowspan=1) - ax.plot(psiq, fpsi[1, :]) - # ax.set_xlim([0,6]) - # ax.set_xlabel('$\psi$') - ax.set_ylabel("$fpol$") - ax.yaxis.label.set_size(20) - # ax.xaxis.label.set_size(20) - - ax.set_xticks([]) - - ax.yaxis.label.set_rotation("horizontal") - # ax.xaxis.labelpad = 10 - ax.yaxis.labelpad = 20 - - # props = dict(boxstyle='round', facecolor='wheat', alpha=0.5) - # ax.text(0.85, 0.95, '$B_t$', transform=ax.transAxes, fontsize=14, - # verticalalignment='top', bbox=props) - - # - draw() - - # plot pressure - - ax = subplot2grid((3, 3), (1, 1), colspan=2, rowspan=1) - ax.plot(psiq, g.pres) - # ax.set_xlim([0,6]) - # ax.set_xlabel('$\psi$') - ax.set_ylabel("$P$") - ax.yaxis.label.set_size(20) - # ax.xaxis.label.set_size(20) - - ax.set_xticks([]) - - ax.yaxis.label.set_rotation("horizontal") - # ax.xaxis.labelpad = 10 - ax.yaxis.labelpad = 20 - - # props = dict(boxstyle='round', facecolor='wheat', alpha=0.5) - # ax.text(0.85, 0.95, '$B_t$', transform=ax.transAxes, fontsize=14, - # verticalalignment='top', bbox=props) - - # - draw() - - # plot qpsi - - ax = subplot2grid((3, 3), (2, 1), colspan=2, rowspan=1) - - ax.plot(psiq, g.qpsi) - ax.set_xlabel(r"$\psi$") - ax.set_ylabel("$q$") - ax.yaxis.label.set_rotation("horizontal") - ax.yaxis.label.set_size(20) - ax.xaxis.label.set_size(20) - - ax.xaxis.labelpad = 10 - ax.yaxis.labelpad = 20 - - tick_params( - axis="x", # changes apply to the x-axis - which="both", # both major and minor ticks are affected - bottom="on", # ticks along the bottom edge are off - top="off", # ticks along the top edge are off - labelbottom="on", - ) - - draw() - - # Compute and draw Jpar - # - # MU = 4.e-7*np.pi - # - # jpar0 = - Bxy * fprime / MU - Rxy*Btxy * dpdpsi / Bxy - # - - # fig.set_tight_layout(True) - - fig.subplots_adjust(left=0.2, top=0.9, hspace=0.1, wspace=0.5) - - draw() - - if option == 0: - show(block=False) - - if option != 0: - return Br, Bz, x, y, psi - - -# # output to files -# np.savetxt('../data.in', np.reshape([g.nx, g.ny],(1,2)), fmt='%i, %i') -# f_handle = open('../data.in', 'a') -# np.savetxt(f_handle,np.reshape([g.rmagx, g.zmagx],(1,2)), fmt='%e, %e') -# np.savetxt(f_handle, np.reshape([np.max(g.r),np.max(g.z)],(1,2))) -# np.savetxt(f_handle, np.reshape([g.bcentr,g.rcentr],(1,2))) -# f_handle.close() -# -# -# f_handle = open('../input.field', 'w') -# np.savetxt(f_handle, np.reshape([dx, dy],(1,2))) -# np.savetxt(f_handle, (g.r[:,0],g.z[0,:])) -# np.savetxt(f_handle, Bx) # fortran compatibility -# np.savetxt(f_handle, By) -# f_handle.close() -# -# np.savetxt('../tbound',[np.size(g.xlim)], fmt='%i') -# f_handle = open('../tbound', 'a') -# np.savetxt(f_handle, (g.xlim,g.ylim)) -# f_handle.close() -# -# np.savetxt('../pbound',[np.size(g.rbdry)], fmt='%i') -# f_handle = open('../pbound', 'a') -# np.savetxt(f_handle, (g.rbdry,g.zbdry)) -# f_handle.close() - - -def surface(cs, i, f, opt_ri, opt_zi, style, iplot=0): - # contour_lines( F, np.arange(nx).astype(float), np.arange(ny).astype(float), - # levels=[start_f]) - # cs=contour( g.r, g.z, g.psi, levels=[f]) - # proxy = [Rectangle((0,0),1,1,fc = 'b') - # for pc in cs.collections] - # - # legend(proxy, ["q="+np.str(i)]) - - p = cs.collections[i].get_paths() - # - # You might get more than one contours for the same start_f. We need to keep the - # closed one - # vn = np.zeros(np.size(p)) - - # find the closed contour - - for k in range(np.size(p)): - v = p[k].vertices - vx = v[:, 0] - vy = v[:, 1] - if [vx[0], vy[0]] == [vx[-1], vy[-1]]: - xx = vx - yy = vy - - x = xx - y = yy - # v = p[0].vertices - # vn[0]=np.shape(v)[0] - # xx=v[:,0] - # yy=v[:,1] - - # if np.shape(vn)[0] > 1: - # for i in xrange(1,np.shape(vn)[0]): - # v = p[i].vertices - # vn[i]=np.shape(v)[0] - # xx = [xx,v[:,0]] - # yy = [yy,v[:,1]] - - # if np.shape(vn)[0] > 1 : - # # Find the surface closest to the o-point - # ind = closest_line(np.size(xx), xx, yy, opt_ri, opt_zi) - # x=xx[ind] - # y=yy[ind] - # else: - # ind = 0 - # x=xx - # y=yy - # - if iplot == 0: - # plot the start_f line - zc = cs.collections[i] - setp(zc, linewidth=4, linestyle=style[i]) - - clabel(cs, [f], inline=1, fmt="%9.6f", fontsize=14) # label the level - - # annotate('q= '+np.str(i+1),(x[0]+.1,y[0]+.1)) - - draw() - - show(block=False) - - return x, y - - -if __name__ == "__main__": - path = "../../tokamak_grids/pyGridGen/" - - g = read_geqdsk(path + "g118898.03400") - - View2D(g, option=0) - show() diff --git a/src/boututils/fft_deriv.py b/src/boututils/fft_deriv.py deleted file mode 100644 index 921e6b9..0000000 --- a/src/boututils/fft_deriv.py +++ /dev/null @@ -1,40 +0,0 @@ -import numpy -from past.utils import old_div - -# ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -# FFT_DERIV: Calculates the derivative of a variable on a ; -# periodic domain. ; -# ; -# ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - - -def fft_deriv(var): - # on_error, 2 - - n = numpy.size(var) - - F = old_div(numpy.fft.fft(var), n) # different definition between IDL - python - - imag = numpy.complex(0.0, 1.0) - imag = numpy.complex_(imag) - - F[0] = 0.0 - - if (n % 2) == 0: - # even number - for i in range(1, old_div(n, 2)): - a = imag * 2.0 * numpy.pi * numpy.float(i) / numpy.float(n) - F[i] = F[i] * a # positive frequencies - F[n - i] = -F[n - i] * a # negative frequencies - - F[old_div(n, 2)] = F[old_div(n, 2)] * (imag * numpy.pi) - else: - # odd number - for i in range(1, old_div((n - 1), 2) + 1): - a = imag * 2.0 * numpy.pi * numpy.float(i) / numpy.float(n) - F[i] = F[i] * a - F[n - i] = -F[n - i] * a - - result = numpy.fft.ifft(F) * n # different definition between IDL - python - - return result diff --git a/src/boututils/file_import.py b/src/boututils/file_import.py deleted file mode 100644 index 34d3554..0000000 --- a/src/boututils/file_import.py +++ /dev/null @@ -1,27 +0,0 @@ -"""Import an entire BOUT++ DataFile into memory - -""" - -from boututils.datafile import DataFile - - -def file_import(name): - """Read all variables from file into a dictionary - - Parameters - ---------- - name : str - Name of file to read - - Returns - ------- - dict - Dictionary containing all the variables in the file - """ - f = DataFile(name) # Open file - varlist = f.list() # Get list of all variables in file - data = {} # Create empty dictionary - for v in varlist: - data[v] = f.read(v) - f.close() - return data diff --git a/src/boututils/idl_tabulate.py b/src/boututils/idl_tabulate.py deleted file mode 100644 index a1b1c8b..0000000 --- a/src/boututils/idl_tabulate.py +++ /dev/null @@ -1,16 +0,0 @@ -import numpy as np -import scipy - - -def idl_tabulate(x, f, p=5): - def newton_cotes(x, f): - if x.shape[0] < 2: - return 0 - rn = (x.shape[0] - 1) * (x - x[0]) / (x[-1] - x[0]) - weights = scipy.integrate.newton_cotes(rn)[0] - return (x[-1] - x[0]) / (x.shape[0] - 1) * np.dot(weights, f) - - ret = 0 - for idx in range(0, x.shape[0], p - 1): - ret += newton_cotes(x[idx : idx + p], f[idx : idx + p]) - return ret diff --git a/src/boututils/plotpolslice.py b/src/boututils/plotpolslice.py deleted file mode 100644 index 9fcd87d..0000000 --- a/src/boututils/plotpolslice.py +++ /dev/null @@ -1,144 +0,0 @@ -import sys - -import numpy as np -from past.utils import old_div - -from boututils.file_import import file_import - -if sys.version_info[0] >= 3: - message = ( - "polplotslice uses the VTK library through mayavi, which" - + " is currently only available in python 2" - ) - raise ImportError(message) -else: - from mayavi import mlab - from tvtk.tools import visual - - -def zinterp(v, zind): - # v = REFORM(v) - nz = np.size(v) - z0 = np.round(zind) - - p = zind - float(z0) # between -0.5 and 0.5 - - if p < 0.0: - z0 = z0 - 1 - p = p + 1.0 - - z0 = ((z0 % (nz - 1)) + (nz - 1)) % (nz - 1) - - # for now 3-point interpolation - - zp = (z0 + 1) % (nz - 1) - zm = (z0 - 1 + (nz - 1)) % (nz - 1) - - result = ( - 0.5 * p * (p - 1.0) * v[zm] - + (1.0 - p * p) * v[z0] - + 0.5 * p * (p + 1.0) * v[zp] - ) - - return result - - -def plotpolslice(var3d, gridfile, period=1, zangle=0.0, rz=1, fig=0): - """data2d = plotpolslice(data3d, 'gridfile' , period=1, zangle=0.0, - rz:return (r,z) grid also=1, fig: to do the graph, set to 1 )""" - - g = file_import(gridfile) - - nx = var3d.shape[0] - ny = var3d.shape[1] - nz = var3d.shape[2] - - zShift = g.get("zShift") - rxy = g.get("Rxy") - zxy = g.get("Zxy") - - dz = 2.0 * np.pi / float(period * nz) - - ny2 = ny - nskip = np.zeros(ny - 1) - for i in range(ny - 1): - ip = (i + 1) % ny - nskip[i] = 0 - for x in range(nx): - ns = old_div(np.max(np.abs(zShift[x, ip] - zShift[x, i])), dz) - 1 - if ns > nskip[i]: - nskip[i] = ns - - nskip = np.int_(np.round(nskip)) - ny2 = np.int_(ny2 + np.sum(nskip)) - - print("Number of poloidal points in output:" + str(ny2)) - - var2d = np.zeros((nx, ny2)) - r = np.zeros((nx, ny2)) - z = np.zeros((nx, ny2)) - - ypos = 0 - for y in range(ny - 1): - # put in the original points - for x in range(nx): - zind = old_div((zangle - zShift[x, y]), dz) - var2d[x, ypos] = zinterp(var3d[x, y, :], zind) - # IF KEYWORD_SET(profile) THEN var2d[x,ypos] = - # var2d[x,ypos] + profile[x,y] - r[x, ypos] = rxy[x, y] - z[x, ypos] = zxy[x, y] - - ypos = ypos + 1 - - print((y, ypos)) - - # and the extra points - - for x in range(nx): - zi0 = old_div((zangle - zShift[x, y]), dz) - zip1 = old_div((zangle - zShift[x, y + 1]), dz) - - dzi = old_div((zip1 - zi0), (nskip[y] + 1)) - - for i in range(nskip[y]): - zi = zi0 + float(i + 1) * dzi # zindex - w = old_div(float(i + 1), float(nskip[y] + 1)) # weighting - - var2d[x, ypos + i] = w * zinterp(var3d[x, y + 1, :], zi) + ( - 1.0 - w - ) * zinterp(var3d[x, y, :], zi) - # IF KEYWORD_SET(profile) THEN var2d[x,ypos+i] = - # var2d[x,ypos+i] + w*profile[x,y+1] + (1.0-w)*profile[x,y] - r[x, ypos + i] = w * rxy[x, y + 1] + (1.0 - w) * rxy[x, y] - z[x, ypos + i] = w * zxy[x, y + 1] + (1.0 - w) * zxy[x, y] - - ypos = ypos + nskip[y] - - # FINAL POINT - - for x in range(nx): - zind = old_div((zangle - zShift[x, ny - 1]), dz) - var2d[x, ypos] = zinterp(var3d[x, ny - 1, :], zind) - # IF KEYWORD_SET(profile) THEN var2d[x,ypos] = var2d[x,ypos] + profile[x,ny-1] - r[x, ypos] = rxy[x, ny - 1] - z[x, ypos] = zxy[x, ny - 1] - - if fig == 1: - f = mlab.figure(size=(600, 600)) - # Tell visual to use this as the viewer. - visual.set_viewer(f) - - s = mlab.mesh( - r, z, var2d, colormap="PuOr" - ) # , wrap_scale='true')#, representation='wireframe') - s.enable_contours = True - s.contour.filled_contours = True - mlab.view(0, 0) - - else: - # return according to opt - if rz == 1: - return r, z, var2d - else: - return var2d diff --git a/src/boututils/radial_grid.py b/src/boututils/radial_grid.py deleted file mode 100644 index 6dbeb3d..0000000 --- a/src/boututils/radial_grid.py +++ /dev/null @@ -1,64 +0,0 @@ -import numpy -from past.utils import old_div - -# ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -# -# radial grid -# -# n - number of grid points -# pin, pout - range of psi -# seps - locations of separatrices -# sep_factor - separatrix peaking -# in_dp=in_dp - Fix the dx on the lower side -# out_dp=out_dp - Fix the dx on the upper side - - -def radial_grid( - n, pin, pout, include_in, include_out, seps, sep_factor, in_dp=None, out_dp=None -): - if n == 1: - return [0.5 * (pin + pout)] - - x = numpy.arange(0.0, n) - m = numpy.float(n - 1) - if include_in is None: - x = x + 0.5 - m = m + 0.5 - - if include_out is None: - m = m + 0.5 - - x = old_div(x, m) - - if in_dp is None and out_dp is None: - # Neither inner or outer gradients set. Just return equal spacing - return pin + (pout - pin) * x - - norm = (x[1] - x[0]) * (pout - pin) - - if in_dp is not None and out_dp is not None: - # Fit to dist = a*i^3 + b*i^2 + c*i - c = old_div(in_dp, norm) - b = 3.0 * (1.0 - c) - old_div(out_dp, norm) + c - a = 1.0 - c - b - elif in_dp is not None: - # Only inner set - c = old_div(in_dp, norm) - a = 0.5 * (c - 1.0) - b = 1.0 - c - a - - # a = 0 - # c = in_dp/norm - # b = 1. - c - else: - # Only outer set. Used in PF region - # Fit to (1-b)*x^a + bx for fixed b - df = old_div(out_dp, norm) - b = 0.25 < df # Make sure a > 0 - a = old_div((df - b), (1.0 - b)) - vals = pin + (pout - pin) * ((1.0 - b) * x ^ a + b * x) - return vals - - vals = pin + (pout - pin) * (c * x + b * x ^ 2 + a * x ^ 3) - # STOP - return vals diff --git a/src/boututils/surface_average.py b/src/boututils/surface_average.py index 0e4c1c0..9f4d09d 100644 --- a/src/boututils/surface_average.py +++ b/src/boututils/surface_average.py @@ -3,12 +3,25 @@ """ import numpy as np +import scipy from past.utils import old_div from boututils.calculus import deriv from boututils.int_func import int_func -from .idl_tabulate import idl_tabulate + +def idl_tabulate(x, f, p=5): + def newton_cotes(x, f): + if x.shape[0] < 2: + return 0 + rn = (x.shape[0] - 1) * (x - x[0]) / (x[-1] - x[0]) + weights = scipy.integrate.newton_cotes(rn)[0] + return (x[-1] - x[0]) / (x.shape[0] - 1) * np.dot(weights, f) + + ret = 0 + for idx in range(0, x.shape[0], p - 1): + ret += newton_cotes(x[idx : idx + p], f[idx : idx + p]) + return ret def surface_average(var, grid, area=None): diff --git a/src/boututils/watch.py b/src/boututils/watch.py deleted file mode 100644 index e499aeb..0000000 --- a/src/boututils/watch.py +++ /dev/null @@ -1,82 +0,0 @@ -""" -Routines for watching files for changes - -""" - -import os -import time - - -def watch(files, timeout=None, poll=2): - """Watch a given file or collection of files until one changes. Uses - polling. - - Parameters - ---------- - files : str or list of str - Name of one or more files to watch - timeout : int, optional - Timeout in seconds (default is no timeout) - poll : int, optional - Polling interval in seconds (default: 2) - - Returns - ------- - str - The name of the first changed file, - or None if timed out before any changes - - Examples - -------- - - To watch one file, timing out after 60 seconds: - - >>> watch('file1', timeout=60) - - To watch 2 files, never timing out: - - >>> watch(['file1', 'file2']) - - Author: Ben Dudson - - """ - - # Get modification time of file(s) - try: - if hasattr(files, "__iter__"): - # Iterable - lastmod = [os.stat(f).st_mtime for f in files] - iterable = True - else: - # Not iterable -> just one file - lastmod = os.stat(files).st_mtime - iterable = False - except FileNotFoundError: - print("Can't test modified time. Wrong file name?") - raise - - start_time = time.time() - running = True - while running: - sleepfor = poll - if timeout: - # Check if timeout will be reached before next poll - if time.time() - start_time + sleepfor > timeout: - # Adjust time so that finish at timeout - sleepfor = timeout - (time.time() - start_time) - running = False # Stop after next test - - time.sleep(sleepfor) - - if iterable: - for last_t, f in zip(lastmod, files): - # Get the new modification time - t = os.stat(f).st_mtime - if t > last_t + 1.0: # +1 to reduce risk of false alarms - # File has been modified - return f - else: - t = os.stat(files).st_mtime - if t > lastmod + 1.0: - return files - return None