diff --git a/pyproject.toml b/pyproject.toml index 6259b77..2f83530 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -43,7 +43,6 @@ dynamic = ["version"] [project.optional-dependencies] tests = [ "pytest", - "pytest-cov", ] docs = [ "sphinx>=3.4,<5", @@ -57,6 +56,3 @@ version = { attr = "setuptools_scm.get_version" } [tool.setuptools_scm] write_to = "src/boutdata/_version.py" - -[tool.pytest.ini_options] -addopts = "--cov=boutdata" diff --git a/src/boutdata/squashoutput.py b/src/boutdata/squashoutput.py index e32a7e4..20ff905 100644 --- a/src/boutdata/squashoutput.py +++ b/src/boutdata/squashoutput.py @@ -110,7 +110,6 @@ def squashoutput( is used. """ # use local imports to allow fast import for tab-completion - import gc import glob import os import shutil @@ -240,7 +239,6 @@ def squashoutput( f.write(varname, var) var = None - gc.collect() # Copy file attributes for attrname in outputs.list_file_attributes(): @@ -254,7 +252,6 @@ def squashoutput( f.close() del outputs - gc.collect() if delete: if append: diff --git a/src/boutdata/tests/make_test_data.py b/src/boutdata/tests/make_test_data.py index f8fe0a5..29d7ad4 100644 --- a/src/boutdata/tests/make_test_data.py +++ b/src/boutdata/tests/make_test_data.py @@ -3,13 +3,13 @@ import numpy as np from netCDF4 import Dataset -field3d_t_list = ["field3d_t_1", "field3d_t_2"] -field3d_list = ["field3d_1", "field3d_2"] -field2d_t_list = ["field2d_t_1", "field2d_t_2"] -field2d_list = ["field2d_1", "field2d_2"] -fieldperp_t_list = ["fieldperp_t_1", "fieldperp_t_2"] -fieldperp_list = ["fieldperp_1", "fieldperp_2"] -scalar_t_list = ["t_array", "scalar_t_1", "scalar_t_2"] +field3d_t_list = ["field3d_t_1"] +field3d_list = ["field3d_1"] +field2d_t_list = ["field2d_t_1"] +field2d_list = ["field2d_1"] +fieldperp_t_list = ["fieldperp_t_1"] +fieldperp_list = ["fieldperp_1"] +scalar_t_list = ["t_array", "scalar_t_1"] # Note "yindex_global" attribute not included here for FieldPerps, because it is handled # specially @@ -19,61 +19,31 @@ "direction_y": "Standard", "direction_z": "Standard", }, - "field3d_t_2": { - "cell_location": "CELL_CENTRE", - "direction_y": "Standard", - "direction_z": "Standard", - }, "field3d_1": { "cell_location": "CELL_CENTRE", "direction_y": "Standard", "direction_z": "Standard", }, - "field3d_2": { - "cell_location": "CELL_CENTRE", - "direction_y": "Standard", - "direction_z": "Standard", - }, "field2d_t_1": { "cell_location": "CELL_CENTRE", "direction_y": "Standard", "direction_z": "Average", }, - "field2d_t_2": { - "cell_location": "CELL_CENTRE", - "direction_y": "Standard", - "direction_z": "Average", - }, "field2d_1": { "cell_location": "CELL_CENTRE", "direction_y": "Standard", "direction_z": "Average", }, - "field2d_2": { - "cell_location": "CELL_CENTRE", - "direction_y": "Standard", - "direction_z": "Average", - }, "fieldperp_t_1": { "cell_location": "CELL_CENTRE", "direction_y": "Standard", "direction_z": "Standard", }, - "fieldperp_t_2": { - "cell_location": "CELL_CENTRE", - "direction_y": "Standard", - "direction_z": "Standard", - }, "fieldperp_1": { "cell_location": "CELL_CENTRE", "direction_y": "Standard", "direction_z": "Standard", }, - "fieldperp_2": { - "cell_location": "CELL_CENTRE", - "direction_y": "Standard", - "direction_z": "Standard", - }, } expected_file_attributes = { @@ -228,7 +198,6 @@ def create3D_t(name): result[name] = data[:, xslice, yslice, zslice] create3D_t("field3d_t_1") - create3D_t("field3d_t_2") def create3D(name): var = outputfile.createVariable(name, float, ("x", "y", "z")) @@ -241,7 +210,6 @@ def create3D(name): result[name] = data[xslice, yslice, zslice] create3D("field3d_1") - create3D("field3d_2") # Field2D def create2D_t(name): @@ -255,7 +223,6 @@ def create2D_t(name): result[name] = data[:, xslice, yslice] create2D_t("field2d_t_1") - create2D_t("field2d_t_2") def create2D(name): var = outputfile.createVariable(name, float, ("x", "y")) @@ -268,7 +235,6 @@ def create2D(name): result[name] = data[xslice, yslice] create2D("field2d_1") - create2D("field2d_2") # FieldPerp def createPerp_t(name): @@ -283,7 +249,6 @@ def createPerp_t(name): result[name] = data[:, xslice, zslice] createPerp_t("fieldperp_t_1") - createPerp_t("fieldperp_t_2") def createPerp(name): var = outputfile.createVariable(name, float, ("x", "z")) @@ -297,7 +262,6 @@ def createPerp(name): result[name] = data[xslice, zslice] createPerp("fieldperp_1") - createPerp("fieldperp_2") # Time-dependent array def createScalar_t(name): @@ -310,7 +274,6 @@ def createScalar_t(name): createScalar_t("t_array") createScalar_t("scalar_t_1") - createScalar_t("scalar_t_2") # Scalar def createScalar(name, value): @@ -388,7 +351,6 @@ def create3D(name): result[name] = data[xslice, yslice, zslice] create3D("field3d_1") - create3D("field3d_2") # Field2D def create2D(name): @@ -402,7 +364,6 @@ def create2D(name): result[name] = data[xslice, yslice] create2D("field2d_1") - create2D("field2d_2") # FieldPerp def createPerp(name): @@ -417,7 +378,6 @@ def createPerp(name): result[name] = data[xslice, zslice] createPerp("fieldperp_1") - createPerp("fieldperp_2") # Scalar def createScalar(name, value): @@ -466,7 +426,7 @@ def concatenate_data(data_list, *, nxpe, fieldperp_yproc_ind, has_t_dim=True): raise ValueError("nxpe={} does not divide len(data_list)={}".format(nxpe, npes)) if has_t_dim: - for var in ("field3d_t_1", "field3d_t_2", "field2d_t_1", "field2d_t_2"): + for var in ("field3d_t_1", "field2d_t_1"): # Join in x-direction parts = [ np.concatenate( @@ -477,7 +437,7 @@ def concatenate_data(data_list, *, nxpe, fieldperp_yproc_ind, has_t_dim=True): # Join in y-direction result[var] = np.concatenate(parts, axis=2) - for var in ("field3d_1", "field3d_2", "field2d_1", "field2d_2"): + for var in ("field3d_1", "field2d_1"): # Join in x-direction parts = [ np.concatenate( @@ -489,7 +449,7 @@ def concatenate_data(data_list, *, nxpe, fieldperp_yproc_ind, has_t_dim=True): result[var] = np.concatenate(parts, axis=1) if has_t_dim: - for var in ("fieldperp_t_1", "fieldperp_t_2"): + for var in ("fieldperp_t_1",): # Join in x-direction result[var] = np.concatenate( [ @@ -501,7 +461,7 @@ def concatenate_data(data_list, *, nxpe, fieldperp_yproc_ind, has_t_dim=True): axis=1, ) - for var in ("fieldperp_1", "fieldperp_2"): + for var in ("fieldperp_1",): # Join in x-direction result[var] = np.concatenate( [ diff --git a/src/boutdata/tests/test_collect.py b/src/boutdata/tests/test_collect.py index e349314..e6f3b2d 100644 --- a/src/boutdata/tests/test_collect.py +++ b/src/boutdata/tests/test_collect.py @@ -1,3 +1,4 @@ +import copy from glob import glob from pathlib import Path @@ -23,6 +24,11 @@ # Note - using tmp_path fixture requires pytest>=3.9.0 collect_kwargs_list = [ + pytest.param({"xguards": True, "yguards": "include_upper"}, id="collect_guards"), + pytest.param({"xguards": False, "yguards": False}, id="collect_noguards"), +] + +collect_kwargs_list_full = [ {"xguards": True, "yguards": "include_upper"}, {"xguards": False, "yguards": "include_upper"}, {"xguards": True, "yguards": True}, @@ -31,15 +37,16 @@ {"xguards": False, "yguards": False}, ] + squash_params_list = [ - (False, {}), - (True, {}), - (True, {"parallel": 2}), + pytest.param((False, {}), id="squash_off"), + pytest.param((True, {}), id="squash_serial"), + pytest.param((True, {"parallel": 2}), id="squash_parallel"), ] def check_collected_data( - expected, + expected_in, *, fieldperp_global_yind, doublenull, @@ -69,6 +76,8 @@ def check_collected_data( squash_kwargs : dict, optional Keyword arguments passed to `squashoutput()`. """ + expected = copy.deepcopy(expected_in) + # Apply effect of arguments to expected data if not collect_kwargs["xguards"]: remove_xboundaries(expected, expected["MXG"]) @@ -115,6 +124,599 @@ def check_collected_data( assert f.read_file_attribute(attrname) == attr +def symlink_dump_files(src: Path, dst: Path): + """Symlink all dump files from ``src`` directory into ``dst``""" + for f in src.glob("*.nc"): + (dst / f.name).symlink_to(f) + + +def create_dump_file_set( + grid_info, fieldperp_global_yind, tmp_path, rng, dump_params, fieldperp_yproc_ind=0 +): + """Create a set of dump files based on ``dump_params`` and return the concatenated data""" + + dumps = [] + for i, boundaries, fieldperp_yind in dump_params: + dumps.append( + create_dump_file( + tmpdir=tmp_path, + rng=rng, + grid_info=grid_info, + i=i, + boundaries=boundaries, + fieldperp_global_yind=fieldperp_yind, + ) + ) + + expected = concatenate_data( + dumps, nxpe=grid_info["NXPE"], fieldperp_yproc_ind=fieldperp_yproc_ind + ) + return expected + + +@pytest.fixture(scope="module") +def core_min(tmp_path_factory): + """Check output from a core-only case using the minimum number of processes""" + + tmp_path = tmp_path_factory.getbasetemp() / "core_min" + tmp_path.mkdir(parents=True, exist_ok=True) + + fieldperp_global_yind = 3 + + # core + # core includes "ylower" and "yupper" even though there is no actual y-boundary + # because collect/squashoutput collect these points + dump_params = [ + (0, ["xinner", "xouter", "ylower", "yupper"], fieldperp_global_yind), + ] + expected = create_dump_file_set( + make_grid_info(), + fieldperp_global_yind, + tmp_path, + np.random.default_rng(100), + dump_params, + ) + + return tmp_path, expected, fieldperp_global_yind + + +@pytest.fixture(scope="module") +def core_full(tmp_path_factory): + """ + Check output from a core-only case using a large number of processes. 'Large' + means there is at least one process in each region with no edges touching + another region. + """ + tmp_path = tmp_path_factory.getbasetemp() / "core_full" + tmp_path.mkdir(parents=True, exist_ok=True) + + fieldperp_global_yind = 3 + + # core + # core includes "ylower" and "yupper" even though there is no actual y-boundary + # because collect/squashoutput collect these points + dump_params = [ + (0, ["xinner", "ylower"], fieldperp_global_yind), + (1, ["ylower"], fieldperp_global_yind), + (2, ["xouter", "ylower"], fieldperp_global_yind), + (3, ["xinner"], -1), + (4, [], -1), + (5, ["xouter"], -1), + (6, ["xinner", "yupper"], -1), + (7, ["yupper"], -1), + (8, ["xouter", "yupper"], -1), + ] + expected = create_dump_file_set( + make_grid_info(nxpe=3, nype=3), + fieldperp_global_yind, + tmp_path, + np.random.default_rng(101), + dump_params, + ) + return tmp_path, expected, fieldperp_global_yind + + +@pytest.fixture(scope="module") +def sol_min(tmp_path_factory): + """Check output from a SOL-only case using the minimum number of processes""" + + tmp_path = tmp_path_factory.getbasetemp() / "sol_min" + tmp_path.mkdir(parents=True, exist_ok=True) + + fieldperp_global_yind = 3 + + # SOL + dump_params = [ + (0, ["xinner", "xouter", "ylower", "yupper"], fieldperp_global_yind), + ] + expected = create_dump_file_set( + make_grid_info(ixseps1=0, ixseps2=0), + fieldperp_global_yind, + tmp_path, + np.random.default_rng(102), + dump_params, + ) + return tmp_path, expected, fieldperp_global_yind + + +@pytest.fixture(scope="module") +def sol_full(tmp_path_factory): + """ + Check output from a SOL-only case using a large number of processes. 'Large' + means there is at least one process in each region with no edges touching + another region. + """ + tmp_path = tmp_path_factory.getbasetemp() / "sol_full" + tmp_path.mkdir(parents=True, exist_ok=True) + + fieldperp_global_yind = 3 + + # SOL + dump_params = [ + (0, ["xinner", "ylower"], fieldperp_global_yind), + (1, ["ylower"], fieldperp_global_yind), + (2, ["xouter", "ylower"], fieldperp_global_yind), + (3, ["xinner"], -1), + (4, [], -1), + (5, ["xouter"], -1), + (6, ["xinner", "yupper"], -1), + (7, ["yupper"], -1), + (8, ["xouter", "yupper"], -1), + ] + expected = create_dump_file_set( + make_grid_info(nxpe=3, nype=3, ixseps1=0, ixseps2=0), + fieldperp_global_yind, + tmp_path, + np.random.default_rng(103), + dump_params, + ) + return tmp_path, expected, fieldperp_global_yind + + +@pytest.fixture(scope="module") +def single_null_min(tmp_path_factory): + """Check output from a single-null case using the minimum number of processes""" + + tmp_path = tmp_path_factory.getbasetemp() / "single_null_min" + tmp_path.mkdir(parents=True, exist_ok=True) + + fieldperp_global_yind = 7 + + dump_params = [ + # inner divertor leg + (0, ["xinner", "xouter", "ylower"], -1), + # core + (1, ["xinner", "xouter"], fieldperp_global_yind), + # outer divertor leg + (2, ["xinner", "xouter", "yupper"], -1), + ] + expected = create_dump_file_set( + make_grid_info(nype=3, ixseps1=4, xpoints=1), + fieldperp_global_yind, + tmp_path, + np.random.default_rng(104), + dump_params, + fieldperp_yproc_ind=1, + ) + return tmp_path, expected, fieldperp_global_yind + + +@pytest.fixture(scope="module") +def single_null_lower_boundary(tmp_path_factory): + """ + Check output from a single-null case using the minimum number of processes. This + test puts the FieldPerp in the lower boundary. + """ + + tmp_path = tmp_path_factory.getbasetemp() / "single_null_lower_boundary" + tmp_path.mkdir(parents=True, exist_ok=True) + + fieldperp_global_yind = 1 + + dump_params = [ + # inner divertor leg + (0, ["xinner", "xouter", "ylower"], fieldperp_global_yind), + # core + (1, ["xinner", "xouter"], -1), + # outer divertor leg + (2, ["xinner", "xouter", "yupper"], -1), + ] + expected = create_dump_file_set( + make_grid_info(nype=3, ixseps1=4, xpoints=1), + fieldperp_global_yind, + tmp_path, + np.random.default_rng(104), + dump_params, + ) + return tmp_path, expected, fieldperp_global_yind + + +@pytest.fixture(scope="module") +def single_null_upper_boundary(tmp_path_factory): + """ + Check output from a single-null case using the minimum number of processes. This + test puts the FieldPerp in the upper boundary. + """ + + tmp_path = tmp_path_factory.getbasetemp() / "single_null_upper_boundary" + tmp_path.mkdir(parents=True, exist_ok=True) + + fieldperp_global_yind = 14 + + dump_params = [ + # inner divertor leg + (0, ["xinner", "xouter", "ylower"], -1), + # core + (1, ["xinner", "xouter"], -1), + # outer divertor leg + (2, ["xinner", "xouter", "yupper"], fieldperp_global_yind), + ] + expected = create_dump_file_set( + make_grid_info(nype=3, ixseps1=4, xpoints=1), + fieldperp_global_yind, + tmp_path, + np.random.default_rng(104), + dump_params, + fieldperp_yproc_ind=2, + ) + return tmp_path, expected, fieldperp_global_yind + + +@pytest.fixture(scope="module") +def single_null_inconsistent(tmp_path_factory): + """ + Check output from a single-null case using the minimum number of processes. This + test has FieldPerps created with inconsistent y-indices to check this produces + an error. + """ + + tmp_path = tmp_path_factory.getbasetemp() / "single_null_inconsistent" + tmp_path.mkdir(parents=True, exist_ok=True) + + fieldperp_global_yind = 7 + + dump_params = [ + # inner divertor leg + (0, ["xinner", "xouter", "ylower"], 2), + # core + (1, ["xinner", "xouter"], 7), + # outer divertor leg + (2, ["xinner", "xouter", "yupper"], -1), + ] + expected = create_dump_file_set( + make_grid_info(nype=3, ixseps1=4, xpoints=1), + fieldperp_global_yind, + tmp_path, + np.random.default_rng(104), + dump_params, + fieldperp_yproc_ind=1, + ) + return tmp_path, expected, fieldperp_global_yind + + +@pytest.fixture(scope="module") +def single_null_full(tmp_path_factory): + """ + Check output from a single-null case using a large number of processes. 'Large' + means there is at least one process in each region with no edges touching + another region. + """ + tmp_path = tmp_path_factory.getbasetemp() / "single_null_full" + tmp_path.mkdir(parents=True, exist_ok=True) + + fieldperp_global_yind = 19 + + dump_params = [ + # inner divertor leg + (0, ["xinner", "ylower"], -1), + (1, ["ylower"], -1), + (2, ["xouter", "ylower"], -1), + (3, ["xinner"], -1), + (4, [], -1), + (5, ["xouter"], -1), + (6, ["xinner"], -1), + (7, [], -1), + (8, ["xouter"], -1), + # core + (9, ["xinner"], -1), + (10, [], -1), + (11, ["xouter"], -1), + (12, ["xinner"], fieldperp_global_yind), + (13, [], fieldperp_global_yind), + (14, ["xouter"], fieldperp_global_yind), + (15, ["xinner"], -1), + (16, [], -1), + (17, ["xouter"], -1), + # outer divertor leg + (18, ["xinner"], -1), + (19, [], -1), + (20, ["xouter"], -1), + (21, ["xinner"], -1), + (22, [], -1), + (23, ["xouter"], -1), + (24, ["xinner", "yupper"], -1), + (25, ["yupper"], -1), + (26, ["xouter", "yupper"], -1), + ] + expected = create_dump_file_set( + make_grid_info(nxpe=3, nype=9, ixseps1=7, xpoints=1), + fieldperp_global_yind, + tmp_path, + np.random.default_rng(105), + dump_params, + fieldperp_yproc_ind=4, + ) + return tmp_path, expected, fieldperp_global_yind + + +@pytest.fixture(scope="module") +def connected_double_null_min(tmp_path_factory): + """Check output from a connected double-null case using the minimum number of processes""" + + tmp_path = tmp_path_factory.getbasetemp() / "connected_double_null_min" + tmp_path.mkdir(parents=True, exist_ok=True) + + fieldperp_global_yind = 7 + + dump_params = [ + # inner, lower divertor leg + (0, ["xinner", "xouter", "ylower"], -1), + # inner core + (1, ["xinner", "xouter"], fieldperp_global_yind), + # inner, upper divertor leg + (2, ["xinner", "xouter", "yupper"], -1), + # outer, upper divertor leg + (3, ["xinner", "xouter", "ylower"], -1), + # outer core + (4, ["xinner", "xouter"], -1), + # outer, lower divertor leg + (5, ["xinner", "xouter", "yupper"], -1), + ] + expected = create_dump_file_set( + make_grid_info(nype=6, ixseps1=4, ixseps2=4, xpoints=2), + fieldperp_global_yind, + tmp_path, + np.random.default_rng(107), + dump_params, + fieldperp_yproc_ind=1, + ) + return tmp_path, expected, fieldperp_global_yind + + +@pytest.fixture(scope="module") +def connected_double_null_full(tmp_path_factory): + """ + Check output from a connected double-null case using a large number of + processes. 'Large' means there is at least one process in each region with no + edges touching another region. + """ + + tmp_path = tmp_path_factory.getbasetemp() / "connected_double_null_full" + tmp_path.mkdir(parents=True, exist_ok=True) + + fieldperp_global_yind = 19 + + dump_params = [ + # inner, lower divertor leg + (0, ["xinner", "ylower"], -1), + (1, ["ylower"], -1), + (2, ["xouter", "ylower"], -1), + (3, ["xinner"], -1), + (4, [], -1), + (5, ["xouter"], -1), + (6, ["xinner"], -1), + (7, [], -1), + (8, ["xouter"], -1), + # inner core + (9, ["xinner"], -1), + (10, [], -1), + (11, ["xouter"], -1), + (12, ["xinner"], fieldperp_global_yind), + (13, [], fieldperp_global_yind), + (14, ["xouter"], fieldperp_global_yind), + (15, ["xinner"], -1), + (16, [], -1), + (17, ["xouter"], -1), + # inner, upper divertor leg + (18, ["xinner"], -1), + (19, [], -1), + (20, ["xouter"], -1), + (21, ["xinner"], -1), + (22, [], -1), + (23, ["xouter"], -1), + (24, ["xinner", "yupper"], -1), + (25, ["yupper"], -1), + (26, ["xouter", "yupper"], -1), + # outer, upper divertor leg + (27, ["xinner", "ylower"], -1), + (28, ["ylower"], -1), + (29, ["xouter", "ylower"], -1), + (30, ["xinner"], -1), + (31, [], -1), + (32, ["xouter"], -1), + (33, ["xinner"], -1), + (34, [], -1), + (35, ["xouter"], -1), + # outer core + (36, ["xinner"], -1), + (37, [], -1), + (38, ["xouter"], -1), + (39, ["xinner"], -1), + (40, [], -1), + (41, ["xouter"], -1), + (42, ["xinner"], -1), + (43, [], -1), + (44, ["xouter"], -1), + # outer, lower divertor leg + (45, ["xinner"], -1), + (46, [], -1), + (47, ["xouter"], -1), + (48, ["xinner"], -1), + (49, [], -1), + (50, ["xouter"], -1), + (51, ["xinner", "yupper"], -1), + (52, ["yupper"], -1), + (53, ["xouter", "yupper"], -1), + ] + expected = create_dump_file_set( + make_grid_info(nxpe=3, nype=18, ixseps1=7, ixseps2=7, xpoints=2), + fieldperp_global_yind, + tmp_path, + np.random.default_rng(108), + dump_params, + fieldperp_yproc_ind=4, + ) + return tmp_path, expected, fieldperp_global_yind + + +@pytest.fixture(scope="module") +def disconnected_double_null_min(tmp_path_factory): + """Check output from a disconnected double-null case using the minimum number of processes""" + + tmp_path = tmp_path_factory.getbasetemp() / "disconnected_double_null_min" + tmp_path.mkdir(parents=True, exist_ok=True) + + fieldperp_global_yind = 7 + + dump_params = [ + # inner, lower divertor leg + (0, ["xinner", "xouter", "ylower"], -1), + # inner core + (1, ["xinner", "xouter"], fieldperp_global_yind), + # inner, upper divertor leg + (2, ["xinner", "xouter", "yupper"], -1), + # outer, upper divertor leg + (3, ["xinner", "xouter", "ylower"], -1), + # outer core + (4, ["xinner", "xouter"], -1), + # outer, lower divertor leg + (5, ["xinner", "xouter", "yupper"], -1), + ] + expected = create_dump_file_set( + make_grid_info(nype=6, ixseps1=3, ixseps2=5, xpoints=2), + fieldperp_global_yind, + tmp_path, + np.random.default_rng(109), + dump_params, + fieldperp_yproc_ind=1, + ) + return tmp_path, expected, fieldperp_global_yind + + +disconnected_double_null_params = [ + # inner, lower divertor leg + (0, ["xinner", "ylower"], -1), + (1, ["ylower"], -1), + (2, ["xouter", "ylower"], -1), + (3, ["xinner"], -1), + (4, [], -1), + (5, ["xouter"], -1), + (6, ["xinner"], -1), + (7, [], -1), + (8, ["xouter"], -1), + # inner core + (9, ["xinner"], -1), + (10, [], -1), + (11, ["xouter"], -1), + (12, ["xinner"], 19), + (13, [], 19), + (14, ["xouter"], 19), + (15, ["xinner"], -1), + (16, [], -1), + (17, ["xouter"], -1), + # inner, upper divertor leg + (18, ["xinner"], -1), + (19, [], -1), + (20, ["xouter"], -1), + (21, ["xinner"], -1), + (22, [], -1), + (23, ["xouter"], -1), + (24, ["xinner", "yupper"], -1), + (25, ["yupper"], -1), + (26, ["xouter", "yupper"], -1), + # outer, upper divertor leg + (27, ["xinner", "ylower"], -1), + (28, ["ylower"], -1), + (29, ["xouter", "ylower"], -1), + (30, ["xinner"], -1), + (31, [], -1), + (32, ["xouter"], -1), + (33, ["xinner"], -1), + (34, [], -1), + (35, ["xouter"], -1), + # outer core + (36, ["xinner"], -1), + (37, [], -1), + (38, ["xouter"], -1), + (39, ["xinner"], -1), + (40, [], -1), + (41, ["xouter"], -1), + (42, ["xinner"], -1), + (43, [], -1), + (44, ["xouter"], -1), + # outer, lower divertor leg + (45, ["xinner"], -1), + (46, [], -1), + (47, ["xouter"], -1), + (48, ["xinner"], -1), + (49, [], -1), + (50, ["xouter"], -1), + (51, ["xinner", "yupper"], -1), + (52, ["yupper"], -1), + (53, ["xouter", "yupper"], -1), +] + + +@pytest.fixture(scope="module") +def disconnected_double_null_full(tmp_path_factory): + """Check output from a disconnected double-null case using a large number of + processes. 'Large' means there is at least one process in each region with + no edges touching another region. + + """ + + tmp_path = tmp_path_factory.getbasetemp() / "disconnected_double_null_full" + tmp_path.mkdir(parents=True, exist_ok=True) + + fieldperp_global_yind = 19 + + expected = create_dump_file_set( + make_grid_info(nxpe=3, nype=18, ixseps1=6, ixseps2=11, xpoints=2), + fieldperp_global_yind, + tmp_path, + np.random.default_rng(110), + disconnected_double_null_params, + fieldperp_yproc_ind=4, + ) + return tmp_path, expected, fieldperp_global_yind + + +@pytest.fixture(scope="module") +def disconnected_double_null_full_no_guards(tmp_path_factory): + """Check output from a disconnected double-null case using a large number of + processes. 'Large' means there is at least one process in each region with + no edges touching another region. This case has no guard cells + + """ + + tmp_path = ( + tmp_path_factory.getbasetemp() / "disconnected_double_null_full_no_guards" + ) + tmp_path.mkdir(parents=True, exist_ok=True) + + fieldperp_global_yind = 19 + + expected = create_dump_file_set( + make_grid_info(mxg=0, myg=0, nxpe=3, nype=18, ixseps1=6, ixseps2=11, xpoints=2), + fieldperp_global_yind, + tmp_path, + np.random.default_rng(110), + disconnected_double_null_params, + fieldperp_yproc_ind=4, + ) + return tmp_path, expected, fieldperp_global_yind + + def check_variable( varname, actual, expected_data, expected_attributes, fieldperp_global_yind ): @@ -134,7 +736,7 @@ def check_variable( fieldperp_global_yind : int Global y-index where FieldPerps have been created """ - npt.assert_array_equal(expected_data, actual) + npt.assert_array_equal(expected_data, actual, err_msg=varname) actual_keys = list(actual.attributes.keys()) if expected_attributes is not None: for a in expected_attributes: @@ -165,49 +767,39 @@ def check_variable( assert actual.attributes["bout_type"] == "scalar" +scenario_list = [ + "core_min", + "core_full", + "sol_min", + "sol_full", + "single_null_min", + "single_null_lower_boundary", + "single_null_upper_boundary", + "single_null_full", + "connected_double_null_min", + "connected_double_null_full", + "disconnected_double_null_min", + "disconnected_double_null_full", + "disconnected_double_null_full_no_guards", +] + + class TestCollect: @pytest.mark.parametrize("squash_params", squash_params_list) @pytest.mark.parametrize("collect_kwargs", collect_kwargs_list) - def test_core_min_files(self, tmp_path, squash_params, collect_kwargs): - """ - Check output from a core-only case using the minimum number of processes - """ + @pytest.mark.parametrize("scenario", scenario_list) + def test_tokamak_scenario_collect( + self, tmp_path, scenario, collect_kwargs, squash_params, request + ): + """Test basic collect in different scenarios""" squash, squash_kwargs = squash_params - - grid_info = make_grid_info() - - fieldperp_global_yind = 3 - fieldperp_yproc_ind = 0 - - rng = np.random.default_rng(100) - - # core - # core includes "ylower" and "yupper" even though there is no actual y-boundary - # because collect/squashoutput collect these points - dump_params = [ - (0, ["xinner", "xouter", "ylower", "yupper"], fieldperp_global_yind), - ] - dumps = [] - for i, boundaries, fieldperp_yind in dump_params: - dumps.append( - create_dump_file( - tmpdir=tmp_path, - rng=rng, - grid_info=grid_info, - i=i, - boundaries=boundaries, - fieldperp_global_yind=fieldperp_yind, - ) - ) - - expected = concatenate_data( - dumps, nxpe=grid_info["NXPE"], fieldperp_yproc_ind=fieldperp_yproc_ind - ) + data_path, expected, fieldperp_global_yind = request.getfixturevalue(scenario) + symlink_dump_files(data_path, tmp_path) check_collected_data( expected, fieldperp_global_yind=fieldperp_global_yind, - doublenull=False, + doublenull="double_null" in scenario, path=tmp_path, squash=squash, collect_kwargs=collect_kwargs, @@ -228,11 +820,15 @@ def test_core_min_files(self, tmp_path, squash_params, collect_kwargs): (7, None), ], ) - def test_core_min_files_existing_squash_file_raises(self, tmp_path, time_split): + def test_core_min_files_existing_squash_file_raises( + self, core_min, tmp_path, time_split + ): """ Check output from a core-only case using the minimum number of processes """ time_split_size, time_split_first_label = time_split + data_path, _, _ = core_min + symlink_dump_files(data_path, tmp_path) squash_kwargs = {} if time_split_size is not None: @@ -240,28 +836,6 @@ def test_core_min_files_existing_squash_file_raises(self, tmp_path, time_split): if time_split_first_label is not None: squash_kwargs["time_split_first_label"] = time_split_first_label - grid_info = make_grid_info() - - fieldperp_global_yind = 3 - - rng = np.random.default_rng(100) - - # core - # core includes "ylower" and "yupper" even though there is no actual y-boundary - # because collect/squashoutput collect these points - dump_params = [ - (0, ["xinner", "xouter", "ylower", "yupper"], fieldperp_global_yind), - ] - for i, boundaries, fieldperp_yind in dump_params: - create_dump_file( - tmpdir=tmp_path, - rng=rng, - grid_info=grid_info, - i=i, - boundaries=boundaries, - fieldperp_global_yind=fieldperp_yind, - ) - if time_split_size is None: filenames = ["boutdata.nc"] else: @@ -290,55 +864,24 @@ def test_core_min_files_existing_squash_file_raises(self, tmp_path, time_split): "time_split", [ (1, None), - (2, None), (2, 3), - (3, None), - (4, None), (5, None), - (6, None), (7, None), ], ) @pytest.mark.parametrize("parallel", [False, 2]) - def test_core_min_files_time_split(self, tmp_path, time_split, parallel): + def test_core_min_files_time_split(self, core_min, tmp_path, time_split, parallel): """ Check output from a core-only case using the minimum number of processes """ + data_path, expected, fieldperp_global_yind = core_min + symlink_dump_files(data_path, tmp_path) + collect_kwargs = {"xguards": True, "yguards": "include_upper"} squash_kwargs = {"time_split_size": time_split[0], "parallel": parallel} if time_split[1] is not None: squash_kwargs["time_split_first_label"] = time_split[1] - grid_info = make_grid_info() - - fieldperp_global_yind = 3 - fieldperp_yproc_ind = 0 - - rng = np.random.default_rng(100) - - # core - # core includes "ylower" and "yupper" even though there is no actual y-boundary - # because collect/squashoutput collect these points - dump_params = [ - (0, ["xinner", "xouter", "ylower", "yupper"], fieldperp_global_yind), - ] - dumps = [] - for i, boundaries, fieldperp_yind in dump_params: - dumps.append( - create_dump_file( - tmpdir=tmp_path, - rng=rng, - grid_info=grid_info, - i=i, - boundaries=boundaries, - fieldperp_global_yind=fieldperp_yind, - ) - ) - - expected = concatenate_data( - dumps, nxpe=grid_info["NXPE"], fieldperp_yproc_ind=fieldperp_yproc_ind - ) - # Copy of check_collected_data code, modified to test series of output # files created when setting time_split_size ###################################################################### @@ -384,35 +927,16 @@ def test_core_min_files_time_split(self, tmp_path, time_split, parallel): fieldperp_global_yind, ) - def test_core_min_files_append_time_split_raises(self, tmp_path): + def test_core_min_files_append_time_split_raises(self, core_min, tmp_path): """ Check output from a core-only case using the minimum number of processes """ + data_path, expected, fieldperp_global_yind = core_min + symlink_dump_files(data_path, tmp_path) + collect_kwargs = {"xguards": True, "yguards": "include_upper"} squash_kwargs = {"time_split_size": 2, "append": True} - grid_info = make_grid_info() - - fieldperp_global_yind = 3 - - rng = np.random.default_rng(100) - - # core - # core includes "ylower" and "yupper" even though there is no actual y-boundary - # because collect/squashoutput collect these points - dump_params = [ - (0, ["xinner", "xouter", "ylower", "yupper"], fieldperp_global_yind), - ] - for i, boundaries, fieldperp_yind in dump_params: - create_dump_file( - tmpdir=tmp_path, - rng=rng, - grid_info=grid_info, - i=i, - boundaries=boundaries, - fieldperp_global_yind=fieldperp_yind, - ) - with pytest.raises( ValueError, match="'time_split_size' is not compatible with append=True" ): @@ -421,537 +945,53 @@ def test_core_min_files_append_time_split_raises(self, tmp_path): ) @pytest.mark.parametrize("squash_params", squash_params_list) - @pytest.mark.parametrize("collect_kwargs", collect_kwargs_list) - def test_core(self, tmp_path, squash_params, collect_kwargs): + def test_singlenull_min_files_fieldperp_on_two_yproc_different_index( + self, single_null_inconsistent, tmp_path, squash_params + ): """ - Check output from a core-only case using a large number of processes. 'Large' - means there is at least one process in each region with no edges touching - another region. + Check output from a single-null case using the minimum number of processes. This + test has FieldPerps created with inconsistent y-indices to check this produces + an error. """ - squash, squash_kwargs = squash_params - - grid_info = make_grid_info(nxpe=3, nype=3) - - fieldperp_global_yind = 3 - fieldperp_yproc_ind = 0 - - rng = np.random.default_rng(101) - - # core - # core includes "ylower" and "yupper" even though there is no actual y-boundary - # because collect/squashoutput collect these points - dump_params = [ - (0, ["xinner", "ylower"], fieldperp_global_yind), - (1, ["ylower"], fieldperp_global_yind), - (2, ["xouter", "ylower"], fieldperp_global_yind), - (3, ["xinner"], -1), - (4, [], -1), - (5, ["xouter"], -1), - (6, ["xinner", "yupper"], -1), - (7, ["yupper"], -1), - (8, ["xouter", "yupper"], -1), - ] - dumps = [] - for i, boundaries, fieldperp_yind in dump_params: - dumps.append( - create_dump_file( - tmpdir=tmp_path, - rng=rng, - grid_info=grid_info, - i=i, - boundaries=boundaries, - fieldperp_global_yind=fieldperp_yind, - ) - ) - - expected = concatenate_data( - dumps, nxpe=grid_info["NXPE"], fieldperp_yproc_ind=fieldperp_yproc_ind - ) - - check_collected_data( - expected, - fieldperp_global_yind=fieldperp_global_yind, - doublenull=False, - path=tmp_path, - squash=squash, - collect_kwargs=collect_kwargs, - squash_kwargs=squash_kwargs, - ) + data_path, expected, fieldperp_global_yind = single_null_inconsistent + symlink_dump_files(data_path, tmp_path) - @pytest.mark.parametrize("squash_params", squash_params_list) - @pytest.mark.parametrize("collect_kwargs", collect_kwargs_list) - def test_sol_min_files(self, tmp_path, squash_params, collect_kwargs): - """ - Check output from a SOL-only case using the minimum number of processes - """ squash, squash_kwargs = squash_params + collect_kwargs = {"xguards": True, "yguards": "include_upper"} - grid_info = make_grid_info(ixseps1=0, ixseps2=0) - - fieldperp_global_yind = 3 - fieldperp_yproc_ind = 0 - - rng = np.random.default_rng(102) - - # SOL - dump_params = [ - (0, ["xinner", "xouter", "ylower", "yupper"], fieldperp_global_yind), - ] - dumps = [] - for i, boundaries, fieldperp_yind in dump_params: - dumps.append( - create_dump_file( - tmpdir=tmp_path, - rng=rng, - grid_info=grid_info, - i=i, - boundaries=boundaries, - fieldperp_global_yind=fieldperp_yind, - ) + with pytest.raises(ValueError, match="Found FieldPerp"): + check_collected_data( + expected, + fieldperp_global_yind=fieldperp_global_yind, + doublenull=False, + path=tmp_path, + squash=squash, + collect_kwargs=collect_kwargs, + squash_kwargs=squash_kwargs, ) - expected = concatenate_data( - dumps, nxpe=grid_info["NXPE"], fieldperp_yproc_ind=fieldperp_yproc_ind - ) - - check_collected_data( - expected, - fieldperp_global_yind=fieldperp_global_yind, - doublenull=False, - path=tmp_path, - squash=squash, - collect_kwargs=collect_kwargs, - squash_kwargs=squash_kwargs, - ) - - @pytest.mark.parametrize("squash_params", squash_params_list) - @pytest.mark.parametrize("collect_kwargs", collect_kwargs_list) - def test_sol(self, tmp_path, squash_params, collect_kwargs): + @pytest.mark.parametrize( + "squash_kwargs", + ({"parallel": 1}, {"parallel": 8}, {"parallel": True}), + ) + def test_singlenull_squashoutput_np( + self, single_null_full, tmp_path, squash_kwargs + ): """ - Check output from a SOL-only case using a large number of processes. 'Large' + Check output from a single-null case using a large number of processes. 'Large' means there is at least one process in each region with no edges touching another region. """ - squash, squash_kwargs = squash_params - - grid_info = make_grid_info(nxpe=3, nype=3, ixseps1=0, ixseps2=0) - - fieldperp_global_yind = 3 - fieldperp_yproc_ind = 0 - - rng = np.random.default_rng(103) - - # SOL - dump_params = [ - (0, ["xinner", "ylower"], fieldperp_global_yind), - (1, ["ylower"], fieldperp_global_yind), - (2, ["xouter", "ylower"], fieldperp_global_yind), - (3, ["xinner"], -1), - (4, [], -1), - (5, ["xouter"], -1), - (6, ["xinner", "yupper"], -1), - (7, ["yupper"], -1), - (8, ["xouter", "yupper"], -1), - ] - dumps = [] - for i, boundaries, fieldperp_yind in dump_params: - dumps.append( - create_dump_file( - tmpdir=tmp_path, - rng=rng, - grid_info=grid_info, - i=i, - boundaries=boundaries, - fieldperp_global_yind=fieldperp_yind, - ) - ) - - expected = concatenate_data( - dumps, nxpe=grid_info["NXPE"], fieldperp_yproc_ind=fieldperp_yproc_ind - ) + data_path, expected, fieldperp_global_yind = single_null_full + symlink_dump_files(data_path, tmp_path) check_collected_data( expected, fieldperp_global_yind=fieldperp_global_yind, doublenull=False, path=tmp_path, - squash=squash, - collect_kwargs=collect_kwargs, - squash_kwargs=squash_kwargs, - ) - - @pytest.mark.parametrize("squash_params", squash_params_list) - @pytest.mark.parametrize("collect_kwargs", collect_kwargs_list) - def test_singlenull_min_files(self, tmp_path, squash_params, collect_kwargs): - """ - Check output from a single-null case using the minimum number of processes - """ - squash, squash_kwargs = squash_params - - grid_info = make_grid_info(nype=3, ixseps1=4, xpoints=1) - - fieldperp_global_yind = 7 - fieldperp_yproc_ind = 1 - - rng = np.random.default_rng(104) - - dump_params = [ - # inner divertor leg - (0, ["xinner", "xouter", "ylower"], -1), - # core - (1, ["xinner", "xouter"], fieldperp_global_yind), - # outer divertor leg - (2, ["xinner", "xouter", "yupper"], -1), - ] - dumps = [] - for i, boundaries, fieldperp_yind in dump_params: - dumps.append( - create_dump_file( - tmpdir=tmp_path, - rng=rng, - grid_info=grid_info, - i=i, - boundaries=boundaries, - fieldperp_global_yind=fieldperp_yind, - ) - ) - - expected = concatenate_data( - dumps, nxpe=grid_info["NXPE"], fieldperp_yproc_ind=fieldperp_yproc_ind - ) - - check_collected_data( - expected, - fieldperp_global_yind=fieldperp_global_yind, - doublenull=False, - path=tmp_path, - squash=squash, - collect_kwargs=collect_kwargs, - squash_kwargs=squash_kwargs, - ) - - @pytest.mark.parametrize("squash_params", squash_params_list) - @pytest.mark.parametrize("collect_kwargs", collect_kwargs_list) - def test_singlenull_min_files_lower_boundary_fieldperp( - self, tmp_path, squash_params, collect_kwargs - ): - """ - Check output from a single-null case using the minimum number of processes. This - test puts the FieldPerp in the lower boundary. - """ - squash, squash_kwargs = squash_params - - grid_info = make_grid_info(nype=3, ixseps1=4, xpoints=1) - - fieldperp_global_yind = 1 - fieldperp_yproc_ind = 0 - - rng = np.random.default_rng(104) - - dump_params = [ - # inner divertor leg - (0, ["xinner", "xouter", "ylower"], fieldperp_global_yind), - # core - (1, ["xinner", "xouter"], -1), - # outer divertor leg - (2, ["xinner", "xouter", "yupper"], -1), - ] - dumps = [] - for i, boundaries, fieldperp_yind in dump_params: - dumps.append( - create_dump_file( - tmpdir=tmp_path, - rng=rng, - grid_info=grid_info, - i=i, - boundaries=boundaries, - fieldperp_global_yind=fieldperp_yind, - ) - ) - - expected = concatenate_data( - dumps, nxpe=grid_info["NXPE"], fieldperp_yproc_ind=fieldperp_yproc_ind - ) - - check_collected_data( - expected, - fieldperp_global_yind=fieldperp_global_yind, - doublenull=False, - path=tmp_path, - squash=squash, - collect_kwargs=collect_kwargs, - squash_kwargs=squash_kwargs, - ) - - @pytest.mark.parametrize("squash_params", squash_params_list) - @pytest.mark.parametrize("collect_kwargs", collect_kwargs_list) - def test_singlenull_min_files_upper_boundary_fieldperp( - self, tmp_path, squash_params, collect_kwargs - ): - """ - Check output from a single-null case using the minimum number of processes. This - test puts the FieldPerp in the upper boundary. - """ - squash, squash_kwargs = squash_params - - grid_info = make_grid_info(nype=3, ixseps1=4, xpoints=1) - - fieldperp_global_yind = 14 - fieldperp_yproc_ind = 2 - - rng = np.random.default_rng(104) - - dump_params = [ - # inner divertor leg - (0, ["xinner", "xouter", "ylower"], -1), - # core - (1, ["xinner", "xouter"], -1), - # outer divertor leg - (2, ["xinner", "xouter", "yupper"], fieldperp_global_yind), - ] - dumps = [] - for i, boundaries, fieldperp_yind in dump_params: - dumps.append( - create_dump_file( - tmpdir=tmp_path, - rng=rng, - grid_info=grid_info, - i=i, - boundaries=boundaries, - fieldperp_global_yind=fieldperp_yind, - ) - ) - - expected = concatenate_data( - dumps, nxpe=grid_info["NXPE"], fieldperp_yproc_ind=fieldperp_yproc_ind - ) - - check_collected_data( - expected, - fieldperp_global_yind=fieldperp_global_yind, - doublenull=False, - path=tmp_path, - squash=squash, - collect_kwargs=collect_kwargs, - squash_kwargs=squash_kwargs, - ) - - @pytest.mark.parametrize("squash_params", squash_params_list) - def test_singlenull_min_files_fieldperp_on_two_yproc_different_index( - self, tmp_path, squash_params - ): - """ - Check output from a single-null case using the minimum number of processes. This - test has FieldPerps created with inconsistent y-indices to check this produces - an error. - """ - squash, squash_kwargs = squash_params - - collect_kwargs = {"xguards": True, "yguards": "include_upper"} - - grid_info = make_grid_info(nype=3, ixseps1=4, xpoints=1) - - fieldperp_global_yind = 7 - fieldperp_yproc_ind = 1 - - rng = np.random.default_rng(104) - - dump_params = [ - # inner divertor leg - (0, ["xinner", "xouter", "ylower"], 2), - # core - (1, ["xinner", "xouter"], 7), - # outer divertor leg - (2, ["xinner", "xouter", "yupper"], -1), - ] - dumps = [] - for i, boundaries, fieldperp_yind in dump_params: - dumps.append( - create_dump_file( - tmpdir=tmp_path, - rng=rng, - grid_info=grid_info, - i=i, - boundaries=boundaries, - fieldperp_global_yind=fieldperp_yind, - ) - ) - - expected = concatenate_data( - dumps, nxpe=grid_info["NXPE"], fieldperp_yproc_ind=fieldperp_yproc_ind - ) - - with pytest.raises(ValueError, match="Found FieldPerp"): - check_collected_data( - expected, - fieldperp_global_yind=fieldperp_global_yind, - doublenull=False, - path=tmp_path, - squash=squash, - collect_kwargs=collect_kwargs, - squash_kwargs=squash_kwargs, - ) - - @pytest.mark.parametrize("squash_params", squash_params_list) - @pytest.mark.parametrize("collect_kwargs", collect_kwargs_list) - def test_singlenull(self, tmp_path, squash_params, collect_kwargs): - """ - Check output from a single-null case using a large number of processes. 'Large' - means there is at least one process in each region with no edges touching - another region. - """ - squash, squash_kwargs = squash_params - - grid_info = make_grid_info(nxpe=3, nype=9, ixseps1=7, xpoints=1) - - fieldperp_global_yind = 19 - fieldperp_yproc_ind = 4 - - rng = np.random.default_rng(105) - - dump_params = [ - # inner divertor leg - (0, ["xinner", "ylower"], -1), - (1, ["ylower"], -1), - (2, ["xouter", "ylower"], -1), - (3, ["xinner"], -1), - (4, [], -1), - (5, ["xouter"], -1), - (6, ["xinner"], -1), - (7, [], -1), - (8, ["xouter"], -1), - # core - (9, ["xinner"], -1), - (10, [], -1), - (11, ["xouter"], -1), - (12, ["xinner"], fieldperp_global_yind), - (13, [], fieldperp_global_yind), - (14, ["xouter"], fieldperp_global_yind), - (15, ["xinner"], -1), - (16, [], -1), - (17, ["xouter"], -1), - # outer divertor leg - (18, ["xinner"], -1), - (19, [], -1), - (20, ["xouter"], -1), - (21, ["xinner"], -1), - (22, [], -1), - (23, ["xouter"], -1), - (24, ["xinner", "yupper"], -1), - (25, ["yupper"], -1), - (26, ["xouter", "yupper"], -1), - ] - dumps = [] - for i, boundaries, fieldperp_yind in dump_params: - dumps.append( - create_dump_file( - tmpdir=tmp_path, - rng=rng, - grid_info=grid_info, - i=i, - boundaries=boundaries, - fieldperp_global_yind=fieldperp_yind, - ) - ) - - expected = concatenate_data( - dumps, nxpe=grid_info["NXPE"], fieldperp_yproc_ind=fieldperp_yproc_ind - ) - - check_collected_data( - expected, - fieldperp_global_yind=fieldperp_global_yind, - doublenull=False, - path=tmp_path, - squash=squash, - collect_kwargs=collect_kwargs, - squash_kwargs=squash_kwargs, - ) - - @pytest.mark.parametrize( - "squash_kwargs", - ( - # {"parallel": False}, - {"parallel": 1}, - {"parallel": 2}, - {"parallel": 3}, - {"parallel": 4}, - {"parallel": 5}, - {"parallel": 6}, - {"parallel": 7}, - {"parallel": 8}, - {"parallel": True}, - ), - ) - def test_singlenull_squashoutput_np(self, tmp_path, squash_kwargs): - """ - Check output from a single-null case using a large number of processes. 'Large' - means there is at least one process in each region with no edges touching - another region. - """ - grid_info = make_grid_info(nxpe=3, nype=9, ixseps1=7, xpoints=1) - - fieldperp_global_yind = 19 - fieldperp_yproc_ind = 4 - - rng = np.random.default_rng(105) - - dump_params = [ - # inner divertor leg - (0, ["xinner", "ylower"], -1), - (1, ["ylower"], -1), - (2, ["xouter", "ylower"], -1), - (3, ["xinner"], -1), - (4, [], -1), - (5, ["xouter"], -1), - (6, ["xinner"], -1), - (7, [], -1), - (8, ["xouter"], -1), - # core - (9, ["xinner"], -1), - (10, [], -1), - (11, ["xouter"], -1), - (12, ["xinner"], fieldperp_global_yind), - (13, [], fieldperp_global_yind), - (14, ["xouter"], fieldperp_global_yind), - (15, ["xinner"], -1), - (16, [], -1), - (17, ["xouter"], -1), - # outer divertor leg - (18, ["xinner"], -1), - (19, [], -1), - (20, ["xouter"], -1), - (21, ["xinner"], -1), - (22, [], -1), - (23, ["xouter"], -1), - (24, ["xinner", "yupper"], -1), - (25, ["yupper"], -1), - (26, ["xouter", "yupper"], -1), - ] - dumps = [] - for i, boundaries, fieldperp_yind in dump_params: - dumps.append( - create_dump_file( - tmpdir=tmp_path, - rng=rng, - grid_info=grid_info, - i=i, - boundaries=boundaries, - fieldperp_global_yind=fieldperp_yind, - ) - ) - - expected = concatenate_data( - dumps, nxpe=grid_info["NXPE"], fieldperp_yproc_ind=fieldperp_yproc_ind - ) - - check_collected_data( - expected, - fieldperp_global_yind=fieldperp_global_yind, - doublenull=False, - path=tmp_path, - squash=True, - collect_kwargs={"xguards": True, "yguards": "include_upper"}, + squash=True, + collect_kwargs={"xguards": True, "yguards": "include_upper"}, squash_kwargs=squash_kwargs, ) @@ -1101,140 +1141,6 @@ def test_singlenull_squashoutput_np(self, tmp_path, squash_kwargs): (None, slice(None)), (None, slice(None)), ), - # y-slicing - ( - (None, slice(None)), - (None, slice(None)), - (17, slice(17, 18)), - (None, slice(None)), - ), - ( - (None, slice(None)), - (None, slice(None)), - (slice(30), slice(30)), - (None, slice(None)), - ), - ( - (None, slice(None)), - (None, slice(None)), - ([0, 28], slice(29)), - (None, slice(None)), - ), - ( - (None, slice(None)), - (None, slice(None)), - (slice(5, None), slice(5, None)), - (None, slice(None)), - ), - ( - (None, slice(None)), - (None, slice(None)), - ([6, -1], slice(6, None)), - (None, slice(None)), - ), - ( - (None, slice(None)), - (None, slice(None)), - (slice(7, 28), slice(7, 28)), - (None, slice(None)), - ), - ( - (None, slice(None)), - (None, slice(None)), - ([8, 27], slice(8, 28)), - (None, slice(None)), - ), - ( - (None, slice(None)), - (None, slice(None)), - (slice(None, None, 5), slice(None, None, 5)), - (None, slice(None)), - ), - ( - (None, slice(None)), - (None, slice(None)), - ([0, -1, 6], slice(None, -1, 6)), - (None, slice(None)), - ), - ( - (None, slice(None)), - (None, slice(None)), - (slice(9, 26, 7), slice(9, 26, 7)), - (None, slice(None)), - ), - ( - (None, slice(None)), - (None, slice(None)), - ([5, 33, 4], slice(5, 33, 4)), - (None, slice(None)), - ), - # z-slicing - ( - (None, slice(None)), - (None, slice(None)), - (None, slice(None)), - (1, slice(1, 2)), - ), - ( - (None, slice(None)), - (None, slice(None)), - (None, slice(None)), - (slice(3), slice(3)), - ), - ( - (None, slice(None)), - (None, slice(None)), - (None, slice(None)), - ([0, 2], slice(3)), - ), - ( - (None, slice(None)), - (None, slice(None)), - (None, slice(None)), - (slice(1, None), slice(1, None)), - ), - ( - (None, slice(None)), - (None, slice(None)), - (None, slice(None)), - ([1, -1], slice(1, None)), - ), - ( - (None, slice(None)), - (None, slice(None)), - (None, slice(None)), - (slice(1, 3), slice(1, 3)), - ), - ( - (None, slice(None)), - (None, slice(None)), - (None, slice(None)), - ([1, 2], slice(1, 3)), - ), - ( - (None, slice(None)), - (None, slice(None)), - (None, slice(None)), - (slice(None, None, 2), slice(None, None, 2)), - ), - ( - (None, slice(None)), - (None, slice(None)), - (None, slice(None)), - ([0, -1, 2], slice(None, -1, 2)), - ), - ( - (None, slice(None)), - (None, slice(None)), - (None, slice(None)), - (slice(1, 4, 2), slice(1, 4, 2)), - ), - ( - (None, slice(None)), - (None, slice(None)), - (None, slice(None)), - ([1, 3, 2], slice(1, 3, 2)), - ), # combined slicing ((2, slice(2, 3)), (7, slice(7, 8)), (17, slice(17, 18)), (1, slice(1, 2))), ( @@ -1300,7 +1206,7 @@ def test_singlenull_squashoutput_np(self, tmp_path, squash_kwargs): ], ) def test_singlenull_tind_xind_yind_zind( - self, tmp_path, squash_params, tind, xind, yind, zind + self, single_null_full, tmp_path, squash_params, tind, xind, yind, zind ): """ Check output from a single-null case using a large number of processes. 'Large' @@ -1308,6 +1214,9 @@ def test_singlenull_tind_xind_yind_zind( another region. This test checks the 'tind', 'xind', 'yind' and 'zind' arguments to `collect()` and `squashoutput()`. """ + data_path, expected_original, fieldperp_global_yind = single_null_full + symlink_dump_files(data_path, tmp_path) + tind, tslice = tind xind, xslice = xind yind, yslice = yind @@ -1324,62 +1233,7 @@ def test_singlenull_tind_xind_yind_zind( "zind": zind, } - grid_info = make_grid_info(nxpe=3, nype=9, ixseps1=7, xpoints=1) - - fieldperp_global_yind = 19 - fieldperp_yproc_ind = 4 - - rng = np.random.default_rng(106) - - dump_params = [ - # inner divertor leg - (0, ["xinner", "ylower"], -1), - (1, ["ylower"], -1), - (2, ["xouter", "ylower"], -1), - (3, ["xinner"], -1), - (4, [], -1), - (5, ["xouter"], -1), - (6, ["xinner"], -1), - (7, [], -1), - (8, ["xouter"], -1), - # core - (9, ["xinner"], -1), - (10, [], -1), - (11, ["xouter"], -1), - (12, ["xinner"], fieldperp_global_yind), - (13, [], fieldperp_global_yind), - (14, ["xouter"], fieldperp_global_yind), - (15, ["xinner"], -1), - (16, [], -1), - (17, ["xouter"], -1), - # outer divertor leg - (18, ["xinner"], -1), - (19, [], -1), - (20, ["xouter"], -1), - (21, ["xinner"], -1), - (22, [], -1), - (23, ["xouter"], -1), - (24, ["xinner", "yupper"], -1), - (25, ["yupper"], -1), - (26, ["xouter", "yupper"], -1), - ] - dumps = [] - for i, boundaries, fieldperp_yind in dump_params: - dumps.append( - create_dump_file( - tmpdir=tmp_path, - rng=rng, - grid_info=grid_info, - i=i, - boundaries=boundaries, - fieldperp_global_yind=fieldperp_yind, - ) - ) - - expected = concatenate_data( - dumps, nxpe=grid_info["NXPE"], fieldperp_yproc_ind=fieldperp_yproc_ind - ) - + expected = copy.deepcopy(expected_original) # Can only apply here (before effect of 'xguards' and 'yguards' is applied in # check_collected_data) because we keep 'xguards=True' and # 'yguards="include_upper"' for this test, so neither has an effect. @@ -1395,449 +1249,24 @@ def test_singlenull_tind_xind_yind_zind( squash_kwargs=squash_kwargs, ) - @pytest.mark.parametrize("squash_params", squash_params_list) - @pytest.mark.parametrize("collect_kwargs", collect_kwargs_list) - def test_connected_doublenull_min_files( - self, tmp_path, squash_params, collect_kwargs - ): - """ - Check output from a connected double-null case using the minimum number of - processes - """ - squash, squash_kwargs = squash_params - - grid_info = make_grid_info(nype=6, ixseps1=4, ixseps2=4, xpoints=2) - - fieldperp_global_yind = 7 - fieldperp_yproc_ind = 1 - - rng = np.random.default_rng(107) - - dump_params = [ - # inner, lower divertor leg - (0, ["xinner", "xouter", "ylower"], -1), - # inner core - (1, ["xinner", "xouter"], fieldperp_global_yind), - # inner, upper divertor leg - (2, ["xinner", "xouter", "yupper"], -1), - # outer, upper divertor leg - (3, ["xinner", "xouter", "ylower"], -1), - # outer core - (4, ["xinner", "xouter"], -1), - # outer, lower divertor leg - (5, ["xinner", "xouter", "yupper"], -1), - ] - dumps = [] - for i, boundaries, fieldperp_yind in dump_params: - dumps.append( - create_dump_file( - tmpdir=tmp_path, - rng=rng, - grid_info=grid_info, - i=i, - boundaries=boundaries, - fieldperp_global_yind=fieldperp_yind, - ) - ) - - expected = concatenate_data( - dumps, nxpe=grid_info["NXPE"], fieldperp_yproc_ind=fieldperp_yproc_ind - ) - - check_collected_data( - expected, - fieldperp_global_yind=fieldperp_global_yind, - doublenull=True, - path=tmp_path, - squash=squash, - collect_kwargs=collect_kwargs, - squash_kwargs=squash_kwargs, - ) - - @pytest.mark.parametrize("squash_params", squash_params_list) - @pytest.mark.parametrize("collect_kwargs", collect_kwargs_list) - def test_connected_doublenull(self, tmp_path, squash_params, collect_kwargs): - """ - Check output from a connected double-null case using a large number of - processes. 'Large' means there is at least one process in each region with no - edges touching another region. - """ - squash, squash_kwargs = squash_params - - grid_info = make_grid_info(nxpe=3, nype=18, ixseps1=7, ixseps2=7, xpoints=2) - - fieldperp_global_yind = 19 - fieldperp_yproc_ind = 4 - - rng = np.random.default_rng(108) - - dump_params = [ - # inner, lower divertor leg - (0, ["xinner", "ylower"], -1), - (1, ["ylower"], -1), - (2, ["xouter", "ylower"], -1), - (3, ["xinner"], -1), - (4, [], -1), - (5, ["xouter"], -1), - (6, ["xinner"], -1), - (7, [], -1), - (8, ["xouter"], -1), - # inner core - (9, ["xinner"], -1), - (10, [], -1), - (11, ["xouter"], -1), - (12, ["xinner"], fieldperp_global_yind), - (13, [], fieldperp_global_yind), - (14, ["xouter"], fieldperp_global_yind), - (15, ["xinner"], -1), - (16, [], -1), - (17, ["xouter"], -1), - # inner, upper divertor leg - (18, ["xinner"], -1), - (19, [], -1), - (20, ["xouter"], -1), - (21, ["xinner"], -1), - (22, [], -1), - (23, ["xouter"], -1), - (24, ["xinner", "yupper"], -1), - (25, ["yupper"], -1), - (26, ["xouter", "yupper"], -1), - # outer, upper divertor leg - (27, ["xinner", "ylower"], -1), - (28, ["ylower"], -1), - (29, ["xouter", "ylower"], -1), - (30, ["xinner"], -1), - (31, [], -1), - (32, ["xouter"], -1), - (33, ["xinner"], -1), - (34, [], -1), - (35, ["xouter"], -1), - # outer core - (36, ["xinner"], -1), - (37, [], -1), - (38, ["xouter"], -1), - (39, ["xinner"], -1), - (40, [], -1), - (41, ["xouter"], -1), - (42, ["xinner"], -1), - (43, [], -1), - (44, ["xouter"], -1), - # outer, lower divertor leg - (45, ["xinner"], -1), - (46, [], -1), - (47, ["xouter"], -1), - (48, ["xinner"], -1), - (49, [], -1), - (50, ["xouter"], -1), - (51, ["xinner", "yupper"], -1), - (52, ["yupper"], -1), - (53, ["xouter", "yupper"], -1), - ] - dumps = [] - for i, boundaries, fieldperp_yind in dump_params: - dumps.append( - create_dump_file( - tmpdir=tmp_path, - rng=rng, - grid_info=grid_info, - i=i, - boundaries=boundaries, - fieldperp_global_yind=fieldperp_yind, - ) - ) - - expected = concatenate_data( - dumps, nxpe=grid_info["NXPE"], fieldperp_yproc_ind=fieldperp_yproc_ind - ) - - check_collected_data( - expected, - fieldperp_global_yind=fieldperp_global_yind, - doublenull=True, - path=tmp_path, - squash=squash, - collect_kwargs=collect_kwargs, - squash_kwargs=squash_kwargs, - ) - - @pytest.mark.parametrize("squash_params", squash_params_list) - @pytest.mark.parametrize("collect_kwargs", collect_kwargs_list) - def test_disconnected_doublenull_min_files( - self, tmp_path, squash_params, collect_kwargs - ): - """ - Check output from a disconnected double-null case using the minimum number of - processes - """ - squash, squash_kwargs = squash_params - - grid_info = make_grid_info(nype=6, ixseps1=3, ixseps2=5, xpoints=2) - - fieldperp_global_yind = 7 - fieldperp_yproc_ind = 1 - - rng = np.random.default_rng(109) - - dump_params = [ - # inner, lower divertor leg - (0, ["xinner", "xouter", "ylower"], -1), - # inner core - (1, ["xinner", "xouter"], fieldperp_global_yind), - # inner, upper divertor leg - (2, ["xinner", "xouter", "yupper"], -1), - # outer, upper divertor leg - (3, ["xinner", "xouter", "ylower"], -1), - # outer core - (4, ["xinner", "xouter"], -1), - # outer, lower divertor leg - (5, ["xinner", "xouter", "yupper"], -1), - ] - dumps = [] - for i, boundaries, fieldperp_yind in dump_params: - dumps.append( - create_dump_file( - tmpdir=tmp_path, - rng=rng, - grid_info=grid_info, - i=i, - boundaries=boundaries, - fieldperp_global_yind=fieldperp_yind, - ) - ) - - expected = concatenate_data( - dumps, nxpe=grid_info["NXPE"], fieldperp_yproc_ind=fieldperp_yproc_ind - ) - - check_collected_data( - expected, - fieldperp_global_yind=fieldperp_global_yind, - doublenull=True, - path=tmp_path, - squash=squash, - collect_kwargs=collect_kwargs, - squash_kwargs=squash_kwargs, - ) - - @pytest.mark.parametrize("squash_params", squash_params_list) - @pytest.mark.parametrize("collect_kwargs", collect_kwargs_list) - @pytest.mark.parametrize("mxg", [0, 1, 2]) - @pytest.mark.parametrize("myg", [0, 1, 2]) - def test_disconnected_doublenull( - self, tmp_path, squash_params, collect_kwargs, mxg, myg - ): - """ - Check output from a disconnected double-null case using a large number of - processes. 'Large' means there is at least one process in each region with no - edges touching another region. - """ - squash, squash_kwargs = squash_params - - grid_info = make_grid_info( - mxg=mxg, myg=myg, nxpe=3, nype=18, ixseps1=6, ixseps2=11, xpoints=2 - ) - - fieldperp_global_yind = 19 - fieldperp_yproc_ind = 4 - - rng = np.random.default_rng(110) - - dump_params = [ - # inner, lower divertor leg - (0, ["xinner", "ylower"], -1), - (1, ["ylower"], -1), - (2, ["xouter", "ylower"], -1), - (3, ["xinner"], -1), - (4, [], -1), - (5, ["xouter"], -1), - (6, ["xinner"], -1), - (7, [], -1), - (8, ["xouter"], -1), - # inner core - (9, ["xinner"], -1), - (10, [], -1), - (11, ["xouter"], -1), - (12, ["xinner"], fieldperp_global_yind), - (13, [], fieldperp_global_yind), - (14, ["xouter"], fieldperp_global_yind), - (15, ["xinner"], -1), - (16, [], -1), - (17, ["xouter"], -1), - # inner, upper divertor leg - (18, ["xinner"], -1), - (19, [], -1), - (20, ["xouter"], -1), - (21, ["xinner"], -1), - (22, [], -1), - (23, ["xouter"], -1), - (24, ["xinner", "yupper"], -1), - (25, ["yupper"], -1), - (26, ["xouter", "yupper"], -1), - # outer, upper divertor leg - (27, ["xinner", "ylower"], -1), - (28, ["ylower"], -1), - (29, ["xouter", "ylower"], -1), - (30, ["xinner"], -1), - (31, [], -1), - (32, ["xouter"], -1), - (33, ["xinner"], -1), - (34, [], -1), - (35, ["xouter"], -1), - # outer core - (36, ["xinner"], -1), - (37, [], -1), - (38, ["xouter"], -1), - (39, ["xinner"], -1), - (40, [], -1), - (41, ["xouter"], -1), - (42, ["xinner"], -1), - (43, [], -1), - (44, ["xouter"], -1), - # outer, lower divertor leg - (45, ["xinner"], -1), - (46, [], -1), - (47, ["xouter"], -1), - (48, ["xinner"], -1), - (49, [], -1), - (50, ["xouter"], -1), - (51, ["xinner", "yupper"], -1), - (52, ["yupper"], -1), - (53, ["xouter", "yupper"], -1), - ] - dumps = [] - for i, boundaries, fieldperp_yind in dump_params: - dumps.append( - create_dump_file( - tmpdir=tmp_path, - rng=rng, - grid_info=grid_info, - i=i, - boundaries=boundaries, - fieldperp_global_yind=fieldperp_yind, - ) - ) - - expected = concatenate_data( - dumps, nxpe=grid_info["NXPE"], fieldperp_yproc_ind=fieldperp_yproc_ind - ) - - check_collected_data( - expected, - fieldperp_global_yind=fieldperp_global_yind, - doublenull=True, - path=tmp_path, - squash=squash, - collect_kwargs=collect_kwargs, - squash_kwargs=squash_kwargs, - ) - @pytest.mark.parametrize( "squash_kwargs", [ - {}, {"compress": True, "complevel": 1}, - {"compress": True, "complevel": 2}, - {"compress": True, "complevel": 3}, - {"compress": True, "complevel": 4}, - {"compress": True, "complevel": 5}, - {"compress": True, "complevel": 5}, - {"compress": True, "complevel": 7}, - {"compress": True, "complevel": 8}, {"compress": True, "complevel": 9}, ], ) - def test_disconnected_doublenull_with_compression(self, tmp_path, squash_kwargs): + def test_disconnected_doublenull_with_compression( + self, disconnected_double_null_full, tmp_path, squash_kwargs + ): """ Check output from a disconnected double-null case using a large number of processes. 'Large' means there is at least one process in each region with no edges touching another region. This test checks some compression options that can be used with `squashoutput()`, verifying that they do not modify data. """ - grid_info = make_grid_info(nxpe=3, nype=18, ixseps1=6, ixseps2=11, xpoints=2) - - fieldperp_global_yind = 19 - fieldperp_yproc_ind = 4 - - rng = np.random.default_rng(111) - - dump_params = [ - # inner, lower divertor leg - (0, ["xinner", "ylower"], -1), - (1, ["ylower"], -1), - (2, ["xouter", "ylower"], -1), - (3, ["xinner"], -1), - (4, [], -1), - (5, ["xouter"], -1), - (6, ["xinner"], -1), - (7, [], -1), - (8, ["xouter"], -1), - # inner core - (9, ["xinner"], -1), - (10, [], -1), - (11, ["xouter"], -1), - (12, ["xinner"], fieldperp_global_yind), - (13, [], fieldperp_global_yind), - (14, ["xouter"], fieldperp_global_yind), - (15, ["xinner"], -1), - (16, [], -1), - (17, ["xouter"], -1), - # inner, upper divertor leg - (18, ["xinner"], -1), - (19, [], -1), - (20, ["xouter"], -1), - (21, ["xinner"], -1), - (22, [], -1), - (23, ["xouter"], -1), - (24, ["xinner", "yupper"], -1), - (25, ["yupper"], -1), - (26, ["xouter", "yupper"], -1), - # outer, upper divertor leg - (27, ["xinner", "ylower"], -1), - (28, ["ylower"], -1), - (29, ["xouter", "ylower"], -1), - (30, ["xinner"], -1), - (31, [], -1), - (32, ["xouter"], -1), - (33, ["xinner"], -1), - (34, [], -1), - (35, ["xouter"], -1), - # outer core - (36, ["xinner"], -1), - (37, [], -1), - (38, ["xouter"], -1), - (39, ["xinner"], -1), - (40, [], -1), - (41, ["xouter"], -1), - (42, ["xinner"], -1), - (43, [], -1), - (44, ["xouter"], -1), - # outer, lower divertor leg - (45, ["xinner"], -1), - (46, [], -1), - (47, ["xouter"], -1), - (48, ["xinner"], -1), - (49, [], -1), - (50, ["xouter"], -1), - (51, ["xinner", "yupper"], -1), - (52, ["yupper"], -1), - (53, ["xouter", "yupper"], -1), - ] - dumps = [] - for i, boundaries, fieldperp_yind in dump_params: - dumps.append( - create_dump_file( - tmpdir=tmp_path, - rng=rng, - grid_info=grid_info, - i=i, - boundaries=boundaries, - fieldperp_global_yind=fieldperp_yind, - ) - ) - - expected = concatenate_data( - dumps, nxpe=grid_info["NXPE"], fieldperp_yproc_ind=fieldperp_yproc_ind - ) + data_path, expected, fieldperp_global_yind = disconnected_double_null_full + symlink_dump_files(data_path, tmp_path) collect_kwargs = {"xguards": True, "yguards": "include_upper"}