From 26649b113e53550d422f57fe3df904afc5bfff2c Mon Sep 17 00:00:00 2001 From: w-bonelli Date: Mon, 8 Apr 2024 15:17:21 -0400 Subject: [PATCH 1/4] refactor(particle tracking output): consolidation/cleanup, support prt files --- autotest/test_plotutil.py | 182 +++++--- flopy/plot/plotutil.py | 697 ++++++++++++++++++------------- flopy/utils/modpathfile.py | 90 ++-- flopy/utils/particletrackfile.py | 16 +- flopy/utils/prtfile.py | 166 ++++++++ 5 files changed, 734 insertions(+), 417 deletions(-) create mode 100644 flopy/utils/prtfile.py diff --git a/autotest/test_plotutil.py b/autotest/test_plotutil.py index 8aa228f8a..bc5fbb05a 100644 --- a/autotest/test_plotutil.py +++ b/autotest/test_plotutil.py @@ -3,13 +3,17 @@ import pytest from flopy.plot.plotutil import ( - MP7_ENDPOINT_DTYPE, - MP7_PATHLINE_DTYPE, - PRT_PATHLINE_DTYPE, to_mp7_endpoints, to_mp7_pathlines, to_prt_pathlines, ) +from flopy.utils.modpathfile import ( + EndpointFile as MpEndpointFile, +) +from flopy.utils.modpathfile import ( + PathlineFile as MpPathlineFile, +) +from flopy.utils.prtfile import PathlineFile as PrtPathlineFile PRT_TEST_PATHLINES = pd.DataFrame.from_records( [ @@ -194,7 +198,7 @@ "PRP000000001", # name ], ], - columns=PRT_PATHLINE_DTYPE.names, + columns=PrtPathlineFile.dtypes["base"].names, ) MP7_TEST_PATHLINES = pd.DataFrame.from_records( [ @@ -233,7 +237,7 @@ 1, # timestep ], ], - columns=MP7_PATHLINE_DTYPE.names, + columns=MpPathlineFile.dtypes[7].names, ) MP7_TEST_ENDPOINTS = pd.DataFrame.from_records( [ @@ -322,114 +326,152 @@ 2, # cellface ], ], - columns=MP7_ENDPOINT_DTYPE.names, + columns=MpEndpointFile.dtypes[7].names, ) @pytest.mark.parametrize("dataframe", [True, False]) -def test_to_mp7_pathlines(dataframe): - prt_pls = ( - PRT_TEST_PATHLINES - if dataframe - else PRT_TEST_PATHLINES.to_records(index=False) - ) - mp7_pls = to_mp7_pathlines(prt_pls) +@pytest.mark.parametrize("source", ["prt"]) # , "mp3", "mp5", "mp6"]) +def test_to_mp7_pathlines(dataframe, source): + if source == "prt": + pls = ( + PRT_TEST_PATHLINES + if dataframe + else PRT_TEST_PATHLINES.to_records(index=False) + ) + elif source == "mp3": + pass + elif source == "mp5": + pass + elif source == "mp7": + pass + mp7_pls = to_mp7_pathlines(pls) assert ( - type(prt_pls) + type(pls) == type(mp7_pls) == (pd.DataFrame if dataframe else np.recarray) ) assert len(mp7_pls) == 10 assert set( dict(mp7_pls.dtypes).keys() if dataframe else mp7_pls.dtype.names - ) == set(MP7_PATHLINE_DTYPE.names) + ) == set(MpPathlineFile.dtypes[7].names) @pytest.mark.parametrize("dataframe", [True, False]) -def test_to_mp7_pathlines_empty(dataframe): - mp7_pls = to_mp7_pathlines( - pd.DataFrame.from_records([], columns=PRT_PATHLINE_DTYPE.names) - if dataframe - else np.recarray((0,), dtype=PRT_PATHLINE_DTYPE) - ) - assert mp7_pls.empty if dataframe else mp7_pls.size == 0 +@pytest.mark.parametrize("source", ["prt"]) # , "mp3", "mp5", "mp6"]) +def test_to_mp7_pathlines_empty(dataframe, source): + if source == "prt": + pls = to_mp7_pathlines( + pd.DataFrame.from_records( + [], columns=PrtPathlineFile.dtypes["base"].names + ) + if dataframe + else np.recarray((0,), dtype=PrtPathlineFile.dtypes["base"]) + ) + elif source == "mp3": + pass + elif source == "mp5": + pass + elif source == "mp7": + pass + assert pls.empty if dataframe else pls.size == 0 if dataframe: - mp7_pls = mp7_pls.to_records(index=False) - assert mp7_pls.dtype == MP7_PATHLINE_DTYPE + pls = pls.to_records(index=False) + assert pls.dtype == MpPathlineFile.dtypes[7] @pytest.mark.parametrize("dataframe", [True, False]) def test_to_mp7_pathlines_noop(dataframe): - prt_pls = ( + pls = ( MP7_TEST_PATHLINES if dataframe else MP7_TEST_PATHLINES.to_records(index=False) ) - mp7_pls = to_mp7_pathlines(prt_pls) + mp7_pls = to_mp7_pathlines(pls) assert ( - type(prt_pls) + type(pls) == type(mp7_pls) == (pd.DataFrame if dataframe else np.recarray) ) assert len(mp7_pls) == 2 assert set( dict(mp7_pls.dtypes).keys() if dataframe else mp7_pls.dtype.names - ) == set(MP7_PATHLINE_DTYPE.names) + ) == set(MpPathlineFile.dtypes[7].names) assert np.array_equal( mp7_pls if dataframe else pd.DataFrame(mp7_pls), MP7_TEST_PATHLINES ) @pytest.mark.parametrize("dataframe", [True, False]) -def test_to_mp7_endpoints(dataframe): - mp7_eps = to_mp7_endpoints( - PRT_TEST_PATHLINES - if dataframe - else PRT_TEST_PATHLINES.to_records(index=False) - ) - assert len(mp7_eps) == 1 - assert np.isclose(mp7_eps.time[0], PRT_TEST_PATHLINES.t.max()) +@pytest.mark.parametrize("source", ["prt"]) # , "mp3", "mp5", "mp6"]) +def test_to_mp7_endpoints(dataframe, source): + if source == "prt": + eps = to_mp7_endpoints( + PRT_TEST_PATHLINES + if dataframe + else PRT_TEST_PATHLINES.to_records(index=False) + ) + elif source == "mp3": + pass + elif source == "mp5": + pass + elif source == "mp6": + pass + assert len(eps) == 1 + assert np.isclose(eps.time[0], PRT_TEST_PATHLINES.t.max()) assert set( - dict(mp7_eps.dtypes).keys() if dataframe else mp7_eps.dtype.names - ) == set(MP7_ENDPOINT_DTYPE.names) + dict(eps.dtypes).keys() if dataframe else eps.dtype.names + ) == set(MpEndpointFile.dtypes[7].names) @pytest.mark.parametrize("dataframe", [True, False]) -def test_to_mp7_endpoints_empty(dataframe): - mp7_eps = to_mp7_endpoints( - pd.DataFrame.from_records([], columns=PRT_PATHLINE_DTYPE.names) +@pytest.mark.parametrize("source", ["prt"]) # , "mp3", "mp5", "mp6"]) +def test_to_mp7_endpoints_empty(dataframe, source): + eps = to_mp7_endpoints( + pd.DataFrame.from_records( + [], columns=PrtPathlineFile.dtypes["base"].names + ) if dataframe - else np.recarray((0,), dtype=PRT_PATHLINE_DTYPE) + else np.recarray((0,), dtype=PrtPathlineFile.dtypes["base"]) ) - assert mp7_eps.empty if dataframe else mp7_eps.size == 0 + assert eps.empty if dataframe else eps.size == 0 if dataframe: - mp7_eps = mp7_eps.to_records(index=False) - assert mp7_eps.dtype == MP7_ENDPOINT_DTYPE + eps = eps.to_records(index=False) + assert eps.dtype == MpEndpointFile.dtypes[7] @pytest.mark.parametrize("dataframe", [True, False]) def test_to_mp7_endpoints_noop(dataframe): """Test a recarray or dataframe which already contains MP7 endpoint data""" - mp7_eps = to_mp7_endpoints( + eps = to_mp7_endpoints( MP7_TEST_ENDPOINTS if dataframe else MP7_TEST_ENDPOINTS.to_records(index=False) ) assert np.array_equal( - mp7_eps if dataframe else pd.DataFrame(mp7_eps), MP7_TEST_ENDPOINTS + eps if dataframe else pd.DataFrame(eps), MP7_TEST_ENDPOINTS ) @pytest.mark.parametrize("dataframe", [True, False]) -def test_to_prt_pathlines_roundtrip(dataframe): - mp7_pls = to_mp7_pathlines( - PRT_TEST_PATHLINES - if dataframe - else PRT_TEST_PATHLINES.to_records(index=False) - ) - prt_pls = to_prt_pathlines(mp7_pls) +@pytest.mark.parametrize("source", ["prt"]) # , "mp3", "mp5", "mp6"]) +def test_to_prt_pathlines_roundtrip(dataframe, source): + if source == "prt": + pls = to_mp7_pathlines( + PRT_TEST_PATHLINES + if dataframe + else PRT_TEST_PATHLINES.to_records(index=False) + ) + elif source == "mp3": + pass + elif source == "mp5": + pass + elif source == "mp6": + pass + prt_pls = to_prt_pathlines(pls) if not dataframe: prt_pls = pd.DataFrame(prt_pls) + # import pdb; pdb.set_trace() assert np.allclose( PRT_TEST_PATHLINES.drop( ["imdl", "iprp", "irpt", "name", "istatus", "ireason"], @@ -443,15 +485,25 @@ def test_to_prt_pathlines_roundtrip(dataframe): @pytest.mark.parametrize("dataframe", [True, False]) -def test_to_prt_pathlines_roundtrip_empty(dataframe): - mp7_pls = to_mp7_pathlines( - pd.DataFrame.from_records([], columns=PRT_PATHLINE_DTYPE.names) - if dataframe - else np.recarray((0,), dtype=PRT_PATHLINE_DTYPE) - ) - prt_pls = to_prt_pathlines(mp7_pls) - assert mp7_pls.empty if dataframe else mp7_pls.size == 0 - assert prt_pls.empty if dataframe else mp7_pls.size == 0 +@pytest.mark.parametrize("source", ["prt"]) # , "mp3", "mp5", "mp6"]) +def test_to_prt_pathlines_roundtrip_empty(dataframe, source): + if source == "prt": + pls = to_mp7_pathlines( + pd.DataFrame.from_records( + [], columns=PrtPathlineFile.dtypes["base"].names + ) + if dataframe + else np.recarray((0,), dtype=PrtPathlineFile.dtypes["base"]) + ) + elif source == "mp3": + pass + elif source == "mp5": + pass + elif source == "mp6": + pass + prt_pls = to_prt_pathlines(pls) + assert pls.empty if dataframe else pls.size == 0 + assert prt_pls.empty if dataframe else pls.size == 0 assert set( - dict(mp7_pls.dtypes).keys() if dataframe else mp7_pls.dtype.names - ) == set(MP7_PATHLINE_DTYPE.names) + dict(pls.dtypes).keys() if dataframe else pls.dtype.names + ) == set(MpPathlineFile.dtypes[7].names) diff --git a/flopy/plot/plotutil.py b/flopy/plot/plotutil.py index 57af252a4..320c1ae76 100644 --- a/flopy/plot/plotutil.py +++ b/flopy/plot/plotutil.py @@ -7,14 +7,15 @@ import os import warnings -from itertools import repeat -from typing import Union +from pprint import pformat +from typing import List, Optional, Tuple, Union import matplotlib.pyplot as plt import numpy as np import pandas as pd from ..datbase import DataInterface, DataType +from ..discretization.grid import Grid from ..utils import Util3d, import_optional_dependency warnings.simplefilter("ignore", RuntimeWarning) @@ -2600,369 +2601,465 @@ def parse_modpath_selection_options( return tep, istart, xp, yp -PRT_PATHLINE_DTYPE = np.dtype( - [ - ("kper", np.int32), - ("kstp", np.int32), - ("imdl", np.int32), - ("iprp", np.int32), - ("irpt", np.int32), - ("ilay", np.int32), - ("icell", np.int32), - ("izone", np.int32), - ("istatus", np.int32), - ("ireason", np.int32), - ("trelease", np.float32), - ("t", np.float32), - ("x", np.float32), - ("y", np.float32), - ("z", np.float32), - ("name", np.str_), - ] -) -MP7_PATHLINE_DTYPE = np.dtype( - [ - ("particleid", np.int32), # same as sequencenumber - ("particlegroup", np.int32), - ( - "sequencenumber", - np.int32, - ), # mp7 sequencenumber (globally unique auto-generated ID) - ( - "particleidloc", - np.int32, - ), # mp7 particle ID (unique within a group, user-assigned or autogenerated) - ("time", np.float32), - ("x", np.float32), - ("y", np.float32), - ("z", np.float32), - ("k", np.int32), - ("node", np.int32), - ("xloc", np.float32), - ("yloc", np.float32), - ("zloc", np.float32), - ("stressperiod", np.int32), - ("timestep", np.int32), - ] -) -MP7_ENDPOINT_DTYPE = np.dtype( - [ - ( - "particleid", - np.int32, - ), # mp7 sequencenumber (globally unique auto-generated ID) - ("particlegroup", np.int32), - ( - "particleidloc", - np.int32, - ), # mp7 particle ID (unique within a group, user-assigned or autogenerated) - ("status", np.int32), - ("time0", np.float32), - ("time", np.float32), - ("node0", np.int32), - ("k0", np.int32), - ("xloc0", np.float32), - ("yloc0", np.float32), - ("zloc0", np.float32), - ("x0", np.float32), - ("y0", np.float32), - ("z0", np.float32), - ("zone0", np.int32), - ("initialcellface", np.int32), - ("node", np.int32), - ("k", np.int32), - ("xloc", np.float32), - ("yloc", np.float32), - ("zloc", np.float32), - ("x", np.float32), - ("y", np.float32), - ("z", np.float32), - ("zone", np.int32), - ("cellface", np.int32), - ] -) - - def to_mp7_pathlines( data: Union[np.recarray, pd.DataFrame], + grid: Optional[Grid] = None, + tdis: Optional[List[Tuple[int, int, float]]] = None, ) -> Union[np.recarray, pd.DataFrame]: """ - Convert MODFLOW 6 PRT pathline data to MODPATH 7 pathline format. + Convert pathline data to MODPATH 7 pathline format. Pathline + input ``data`` is expected in MODPATH 3, 5, 6, 7, or MODFLOW + 6 PRT format. + + Notes + ----- + This function is idempotent. Parameters ---------- data : np.recarray or pd.DataFrame - MODFLOW 6 PRT pathline data + Tabular pathline data, in MODPATH or MODFLOW 6 PRT format. + grid : flopy.discretization.grid.Grid + Optional grid discretization to compute local coordinates, + if not provided in the input data. + tdis : list of tuples (int, int, float) + Optional time discretization to compute stress period and + time step, if not provided in the input data. Returns ------- np.recarray or pd.DataFrame (consistent with input type) """ - from flopy.utils.particletrackfile import MIN_PARTICLE_TRACK_DTYPE - - # determine return type - ret_type = type(data) + from flopy.utils.modpathfile import PathlineFile as MpPathlineFile + from flopy.utils.particletrackfile import ParticleTrackFile + from flopy.utils.prtfile import PathlineFile as PrtPathlineFile - # convert to dataframe if needed - if not isinstance(data, pd.DataFrame): + rt = type(data) # return type will match input type + if data.size == 0: # return early if empty + data = np.recarray((0,), dtype=MpPathlineFile.dtypes[7]) + return pd.DataFrame(data) if rt == pd.DataFrame else data + if not isinstance(data, pd.DataFrame): # convert to dataframe if needed data = pd.DataFrame(data) - # check format - dt = data.dtypes - if not ( - all(n in dt for n in MIN_PARTICLE_TRACK_DTYPE.names) - or all(n in dt for n in PRT_PATHLINE_DTYPE.names) + # do the conversion if we recognize the input dtype, otherwise raise an error + if all( + n in data.dtypes for n in MpPathlineFile.dtypes[7].names + ): # already in MP7 format? + return data if rt == pd.DataFrame else data.to_records(index=False) + elif all(n in data.dtypes for n in MpPathlineFile.dtypes[6].names): + # todo stressperiod/timestep, from tdis + # nper = len(tdis) + data = np.core.records.fromarrays( + [ + data["particleid"], + data["particlegroup"], + np.zeros(data.shape[0]), + np.zeros(data.shape[0]), + data["time"], + data["x"], + data["y"], + data["z"], + data["k"], + np.zeros(data.shape[0]) + if grid is None + else data["k"] * grid.nrow * grid.ncol + + data["i"] * grid.ncol + + data["j"], + data["xloc"], + data["yloc"], + data["zloc"], + # todo stressperiod/timestep, from tdis + np.zeros(data.shape[0]), + np.zeros(data.shape[0]), + ], + dtype=MpPathlineFile.dtypes[7], + ) + elif all(n in data.dtypes for n in MpPathlineFile.dtypes[5].names) or all( + n in data.dtypes for n in MpPathlineFile.dtypes[3].names ): - raise ValueError( - "Pathline data must contain the following fields: " - f"{MIN_PARTICLE_TRACK_DTYPE.names} for MODPATH 7, or " - f"{PRT_PATHLINE_DTYPE.names} for MODFLOW 6 PRT" + # todo stressperiod/timestep, from tdis + # nper = len(tdis) + data = np.core.records.fromarrays( + [ + data["particleid"], + np.zeros(data.shape[0]), + np.zeros(data.shape[0]), + np.zeros(data.shape[0]), + data["time"], + data["x"], + data["y"], + data["z"], + data["k"], + np.zeros(data.shape[0]) + if grid is None + else data["k"] * grid.nrow * grid.ncol + + data["i"] * grid.ncol + + data["j"], + np.zeros(data.shape[0]), + np.zeros(data.shape[0]), + data["zloc"], + # todo stressperiod/timestep, from tdis + np.zeros(data.shape[0]), + np.zeros(data.shape[0]), + ], + dtype=MpPathlineFile.dtypes[7], ) - - # return early if already in MP7 format - if "t" not in dt: - return ( - data if ret_type == pd.DataFrame else data.to_records(index=False) + elif all(n in data.dtypes for n in PrtPathlineFile.dtypes["base"].names): + # assign a unique particle index column, incrementing an integer + # for each unique combination of irpt, iprp, imdl, and trelease. + data = data.sort_values(PrtPathlineFile.key_cols) + data["sequencenumber"] = data.groupby( + PrtPathlineFile.key_cols + ).ngroup() + data = np.core.records.fromarrays( + [ + data["sequencenumber"], + data["iprp"], + data["sequencenumber"], + data["irpt"], + data["t"], + data["x"], + data["y"], + data["z"], + data["ilay"], + data["icell"], + # todo local coords (xloc, yloc, zloc) + np.zeros(data.shape[0]), + np.zeros(data.shape[0]), + np.zeros(data.shape[0]), + data["kper"], + data["kstp"], + ], + dtype=MpPathlineFile.dtypes[7], + ) + elif all(n in data.dtypes for n in ParticleTrackFile.dtype.names): + data = np.core.records.fromarrays( + [ + data["particleid"], + np.zeros(data.shape[0]), + np.zeros(data.shape[0]), + np.zeros(data.shape[0]), + data["time"], + data["x"], + data["y"], + data["z"], + data["k"], + np.zeros(data.shape[0]), + np.zeros(data.shape[0]), + np.zeros(data.shape[0]), + np.zeros(data.shape[0]), + np.zeros(data.shape[0]), + np.zeros(data.shape[0]), + ], + dtype=MpPathlineFile.dtypes[7], + ) + else: + raise ValueError( + "Unrecognized dtype, expected:\n" + f"{pformat(MpPathlineFile.dtypes[3].names)} for MODPATH 3, or;\n" + f"{pformat(MpPathlineFile.dtypes[5].names)} for MODPATH 5, or;\n" + f"{pformat(MpPathlineFile.dtypes[6].names)} for MODPATH 6, or;\n" + f"{pformat(MpPathlineFile.dtypes[7].names)} for MODPATH 7, or;\n" + f"{pformat(PrtPathlineFile.dtypes['base'].names)} for MODFLOW 6 PRT, or;\n" + f"{pformat(ParticleTrackFile.dtype.names)} (minimal expected dtype names)" ) - # return early if empty - if data.empty: - ret = np.recarray((0,), dtype=MP7_PATHLINE_DTYPE) - return pd.DataFrame(ret) if ret_type == pd.DataFrame else ret - - # assign a unique particle index column incrementing an integer - # for each unique combination of irpt, iprp, imdl, and trelease - data = data.sort_values(["imdl", "iprp", "irpt", "trelease"]) - particles = data.groupby(["imdl", "iprp", "irpt", "trelease"]) - seqn_key = "sequencenumber" - data[seqn_key] = particles.ngroup() - - # convert to recarray - data = data.to_records(index=False) - - # build mp7 format recarray - ret = np.core.records.fromarrays( - [ - data[seqn_key], - data["iprp"], - data[seqn_key], - data["irpt"], - data["t"], - data["x"], - data["y"], - data["z"], - data["ilay"], - data["icell"], - # todo local coords (xloc, yloc, zloc) - np.zeros(data.shape[0]), - np.zeros(data.shape[0]), - np.zeros(data.shape[0]), - data["kper"], - data["kstp"], - ], - dtype=MP7_PATHLINE_DTYPE, - ) - - return pd.DataFrame(ret) if ret_type == pd.DataFrame else ret + return pd.DataFrame(data) if rt == pd.DataFrame else data def to_mp7_endpoints( data: Union[np.recarray, pd.DataFrame], + grid: Optional[Grid] = None, + tdis: Optional[List[Tuple[int, int, float]]] = None, ) -> Union[np.recarray, pd.DataFrame]: """ - Convert MODFLOW 6 PRT pathline data to MODPATH 7 endpoint format. + Convert pathline or endpoint data to MODPATH 7 endpoint format. + + Notes + ----- + This function is idempotent. Parameters ---------- data : np.recarray or pd.DataFrame - MODFLOW 6 PRT pathline data + Tabular pathline or endpoint data, in MODPATH or MODFLOW 6 PRT format. + grid : flopy.discretization.grid.Grid + Optional grid discretization to compute local coordinates, + if not provided in the input data. + tdis : list of tuples (int, int, float) + Optional time discretization to compute stress period and + time step, if not provided in the input data. Returns ------- np.recarray or pd.DataFrame (consistent with input type) """ - from flopy.utils.particletrackfile import MIN_PARTICLE_TRACK_DTYPE + from flopy.utils.modpathfile import EndpointFile as MpEndpointFile + from flopy.utils.particletrackfile import ParticleTrackFile + from flopy.utils.prtfile import PathlineFile as PrtPathlineFile - # determine return type - ret_type = type(data) - - # convert to dataframe if needed - if isinstance(data, np.recarray): + rt = type(data) # return type will match input type + if data.size == 0: # return early if empty + data = np.recarray((0,), dtype=MpEndpointFile.dtypes[7]) + return pd.DataFrame(data) if rt == pd.DataFrame else data + if not isinstance(data, pd.DataFrame): # convert to dataframe if needed data = pd.DataFrame(data) - # check format - dt = data.dtypes - if all(n in dt for n in MP7_ENDPOINT_DTYPE.names): - return ( - data if ret_type == pd.DataFrame else data.to_records(index=False) + # do the conversion if we recognize the input dtype, otherwise raise an error + if all( + n in data.dtypes for n in MpEndpointFile.dtypes[7].names + ): # already in MP7 format? + return data if rt == pd.DataFrame else data.to_records(index=False) + elif all(n in data.dtypes for n in MpEndpointFile.dtypes[3].names): + raise NotImplementedError("MP3 endpoint files not yet supported") + elif all(n in data.dtypes for n in MpEndpointFile.dtypes[5].names): + raise NotImplementedError("MP5 endpoint files not yet supported") + elif all(n in data.dtypes for n in MpEndpointFile.dtypes[6].names): + raise NotImplementedError("MP6 endpoint files not yet supported") + elif all(n in data.dtypes for n in PrtPathlineFile.dtypes["base"].names): + # assign a unique particle index column incrementing an integer + # for each unique combination of irpt, iprp, imdl, and trelease + data = data.sort_values(["imdl", "iprp", "irpt", "trelease"]) + particles = data.groupby(["imdl", "iprp", "irpt", "trelease"]) + seqn_key = "sequencenumber" + data[seqn_key] = particles.ngroup() + + # select startpoints and endpoints, sorting by sequencenumber + startpts = ( + data.sort_values("t") + .groupby(seqn_key) + .head(1) + .sort_values(seqn_key) ) - if not ( - all(n in dt for n in MIN_PARTICLE_TRACK_DTYPE.names) - or all(n in dt for n in PRT_PATHLINE_DTYPE.names) - ): - raise ValueError( - "Pathline data must contain the following fields: " - f"{MIN_PARTICLE_TRACK_DTYPE.names} for MODPATH 7, or " - f"{PRT_PATHLINE_DTYPE.names} for MODFLOW 6 PRT" + endpts = ( + data.sort_values("t") + .groupby(seqn_key) + .tail(1) + .sort_values(seqn_key) ) - # return early if empty - if data.empty: - ret = np.recarray((0,), dtype=MP7_ENDPOINT_DTYPE) - return pd.DataFrame(ret) if ret_type == pd.DataFrame else ret - - # assign a unique particle index column incrementing an integer - # for each unique combination of irpt, iprp, imdl, and trelease - data = data.sort_values(["imdl", "iprp", "irpt", "trelease"]) - particles = data.groupby(["imdl", "iprp", "irpt", "trelease"]) - seqn_key = "sequencenumber" - data[seqn_key] = particles.ngroup() - - # select startpoints and endpoints, sorting by sequencenumber - startpts = ( - data.sort_values("t").groupby(seqn_key).head(1).sort_values(seqn_key) - ) - endpts = ( - data.sort_values("t").groupby(seqn_key).tail(1).sort_values(seqn_key) - ) + # add columns + pairings = [ + # initial coordinates + ("x0", "x"), + ("y0", "y"), + ("z0", "z"), + # initial zone + ("zone0", "izone"), + # initial node number + ("node0", "icell"), + # initial layer + ("k0", "ilay"), + ] + conditions = [ + startpts[seqn_key].eq(row[seqn_key]) + for _, row in startpts.iterrows() + ] + for fl, fr in pairings: + endpts[fl] = np.select(conditions, startpts[fr].to_numpy()) - # add columns for - pairings = [ - # initial coordinates - ("x0", "x"), - ("y0", "y"), - ("z0", "z"), - # initial zone - ("zone0", "izone"), - # initial node number - ("node0", "icell"), - # initial layer - ("k0", "ilay"), - ] - conditions = [ - startpts[seqn_key].eq(row[seqn_key]) for _, row in startpts.iterrows() - ] - for fl, fr in pairings: - endpts[fl] = np.select(conditions, startpts[fr].to_numpy()) - - # convert to recarray - endpts = endpts.to_records(index=False) - - # build mp7 format recarray - ret = np.core.records.fromarrays( - [ - endpts["sequencenumber"], - endpts["iprp"], - endpts["irpt"], - endpts["istatus"], - endpts["trelease"], - endpts["t"], - endpts["node0"], - endpts["k0"], - # todo initial local coords (xloc0, yloc0, zloc0) - np.zeros(endpts.shape[0]), - np.zeros(endpts.shape[0]), - np.zeros(endpts.shape[0]), - endpts["x0"], - endpts["y0"], - endpts["z0"], - endpts["zone0"], - np.zeros(endpts.shape[0]), # todo initial cell face? - endpts["icell"], - endpts["ilay"], - # todo local coords (xloc, yloc, zloc) - np.zeros(endpts.shape[0]), - np.zeros(endpts.shape[0]), - np.zeros(endpts.shape[0]), - endpts["x"], - endpts["y"], - endpts["z"], - endpts["izone"], - np.zeros(endpts.shape[0]), # todo cell face? - ], - dtype=MP7_ENDPOINT_DTYPE, - ) + data = np.core.records.fromarrays( + [ + endpts["sequencenumber"], + endpts["iprp"], + endpts["irpt"], + endpts["istatus"], + endpts["trelease"], + endpts["t"], + endpts["node0"], + endpts["k0"], + # todo initial local coords (xloc0, yloc0, zloc0) + np.zeros(endpts.shape[0]), + np.zeros(endpts.shape[0]), + np.zeros(endpts.shape[0]), + endpts["x0"], + endpts["y0"], + endpts["z0"], + endpts["zone0"], + np.zeros(endpts.shape[0]), # todo initial cell face? + endpts["icell"], + endpts["ilay"], + # todo local coords (xloc, yloc, zloc) + np.zeros(endpts.shape[0]), + np.zeros(endpts.shape[0]), + np.zeros(endpts.shape[0]), + endpts["x"], + endpts["y"], + endpts["z"], + endpts["izone"], + np.zeros(endpts.shape[0]), # todo cell face? + ], + dtype=MpEndpointFile.dtypes[7], + ) + elif all(n in data.dtypes for n in ParticleTrackFile.dtype.names): + data = np.core.records.fromarrays( + [ + endpts["particleid"], + np.zeros(endpts.shape[0]), + np.zeros(endpts.shape[0]), + np.zeros(endpts.shape[0]), + np.zeros(endpts.shape[0]), + endpts["time"], + np.zeros(endpts.shape[0]), + np.zeros(endpts.shape[0]), + # todo initial local coords (xloc0, yloc0, zloc0) + np.zeros(endpts.shape[0]), + np.zeros(endpts.shape[0]), + np.zeros(endpts.shape[0]), + np.zeros(endpts.shape[0]), + np.zeros(endpts.shape[0]), + np.zeros(endpts.shape[0]), + np.zeros(endpts.shape[0]), + np.zeros(endpts.shape[0]), # todo initial cell face? + endpts["k"], + np.zeros(endpts.shape[0]), + np.zeros(endpts.shape[0]), + np.zeros(endpts.shape[0]), + endpts["x"], + endpts["y"], + endpts["z"], + np.zeros(endpts.shape[0]), + np.zeros(endpts.shape[0]), + ], + dtype=MpEndpointFile.dtypes[7], + ) + else: + raise ValueError( + "Unrecognized dtype, expected:\n" + f"{pformat(MpEndpointFile.dtypes[3].names)} for MODPATH 3, or;\n" + f"{pformat(MpEndpointFile.dtypes[5].names)} for MODPATH 5, or;\n" + f"{pformat(MpEndpointFile.dtypes[6].names)} for MODPATH 6, or;\n" + f"{pformat(MpEndpointFile.dtypes[7].names)} for MODPATH 7, or;\n" + f"{pformat(PrtPathlineFile.dtypes['base'].names)} for MODFLOW 6 PRT, or;\n" + f"{pformat(ParticleTrackFile.dtype.names)} (minimal expected dtype names)" + ) - return pd.DataFrame(ret) if ret_type == pd.DataFrame else ret + return pd.DataFrame(data) if rt == pd.DataFrame else data def to_prt_pathlines( data: Union[np.recarray, pd.DataFrame], ) -> Union[np.recarray, pd.DataFrame]: """ - Convert MODPATH 7 pathline or endpoint data to MODFLOW 6 PRT pathline format. + Convert MODPATH pathline or endpoint data to MODFLOW 6 PRT pathline format. + + todo: support mp timeseries input Parameters ---------- data : np.recarray or pd.DataFrame - MODPATH 7 pathline or endpoint data + MODPATH pathline or endpoint data Returns ------- np.recarray or pd.DataFrame (consistent with input type) """ - # determine return type - ret_type = type(data) - - # convert to dataframe if needed - if isinstance(data, np.recarray): + from flopy.utils.modpathfile import ( + EndpointFile as MpEndpointFile, + ) + from flopy.utils.modpathfile import ( + PathlineFile as MpPathlineFile, + ) + from flopy.utils.particletrackfile import ParticleTrackFile + from flopy.utils.prtfile import PathlineFile as PrtPathlineFile + + rt = type(data) # return type will match input type + if data.size == 0: # return early if empty + data = np.recarray((0,), dtype=PrtPathlineFile.dtypes["base"]) + return pd.DataFrame(data) if rt == pd.DataFrame else data + if not isinstance(data, pd.DataFrame): # convert to dataframe if needed data = pd.DataFrame(data) - # check format - dt = data.dtypes - if not ( - all(n in dt for n in MP7_PATHLINE_DTYPE.names) - or all(n in dt for n in PRT_PATHLINE_DTYPE.names) + # do the conversion if we recognize the input dtype, otherwise raise an error + if all(n in data.dtypes for n in MpPathlineFile.dtypes[7].names) or all( + n in data.dtypes for n in MpEndpointFile.dtypes[7].names ): - raise ValueError( - "Pathline data must contain the following fields: " - f"{MP7_PATHLINE_DTYPE.names} for MODPATH 7, or " - f"{PRT_PATHLINE_DTYPE.names} for MODFLOW 6 PRT" + data = np.core.records.fromarrays( + [ + data["stressperiod"], + data["timestep"], + np.zeros(data.shape[0]), + data["particlegroup"], + data["sequencenumber"], + data["k"], + data["node"], + np.zeros(data.shape[0]), # todo izone? + np.zeros(data.shape[0]), # todo istatus? + np.zeros(data.shape[0]), # todo ireason? + np.zeros(data.shape[0]), # todo trelease? + data["time"], + data["x"], + data["y"], + data["z"], + ["" for _ in range(data.shape[0])], + ], + dtype=PrtPathlineFile.dtypes["base"], ) - - # return early if already in PRT format - if "t" in dt: - return ( - data if ret_type == pd.DataFrame else data.to_records(index=False) + elif all(n in data.dtypes for n in MpPathlineFile.dtypes[3].names) or all( + n in data.dtypes for n in MpEndpointFile.dtypes[3].names + ): + raise NotImplementedError( + "MP3 pathline and endpoint files not yet supported" + ) + elif all(n in data.dtypes for n in MpPathlineFile.dtypes[5].names) or all( + n in data.dtypes for n in MpEndpointFile.dtypes[5].names + ): + raise NotImplementedError( + "MP5 pathline and endpoint files not yet supported" + ) + elif all(n in data.dtypes for n in MpPathlineFile.dtypes[6].names) or all( + n in data.dtypes for n in MpEndpointFile.dtypes[6].names + ): + raise NotImplementedError( + "MP6 pathline and endpoint files not yet supported" + ) + elif all( + n in data.dtypes for n in PrtPathlineFile.dtypes["base"].names + ): # already in prt format? + return data if rt == pd.DataFrame else data.to_records(index=False) + elif all(n in data.dtypes for n in ParticleTrackFile.dtype.names): + data = np.core.records.fromarrays( + [ + np.zeros(data.shape[0]), + np.zeros(data.shape[0]), + np.zeros(data.shape[0]), + np.zeros(data.shape[0]), + data["particleid"], + data["k"], + np.zeros(data.shape[0]), + np.zeros(data.shape[0]), # todo izone? + np.zeros(data.shape[0]), # todo istatus? + np.zeros(data.shape[0]), # todo ireason? + np.zeros(data.shape[0]), # todo trelease? + data["time"], + data["x"], + data["y"], + data["z"], + ], + dtype=PrtPathlineFile.dtypes["base"], + ) + else: + raise ValueError( + "Unrecognized dtype, expected:\n" + f"{pformat(MpPathlineFile.dtypes[7].names)} for MODPATH 7 pathlines, or;\n" + f"{pformat(MpPathlineFile.dtypes[6].names)} for MODPATH 6 pathlines, or;\n" + f"{pformat(MpPathlineFile.dtypes[5].names)} for MODPATH 5 pathlines, or;\n" + f"{pformat(MpPathlineFile.dtypes[3].names)} for MODPATH 3 pathlines, or;\n" + f"{pformat(MpEndpointFile.dtypes[7].names)} for MODPATH 7 endpoints, or;\n" + f"{pformat(MpEndpointFile.dtypes[6].names)} for MODPATH 6 endpoints, or;\n" + f"{pformat(MpEndpointFile.dtypes[5].names)} for MODPATH 5 endpoints, or;\n" + f"{pformat(MpEndpointFile.dtypes[3].names)} for MODPATH 3 endpoints, or;\n" + f"{pformat(PrtPathlineFile.dtypes['base'].names)} for MODFLOW 6 PRT, or;\n" + f"{pformat(ParticleTrackFile.dtype.names)} (minimal expected dtype names)" ) - # return early if empty - if data.empty: - ret = np.recarray((0,), dtype=PRT_PATHLINE_DTYPE) - return pd.DataFrame(ret) if ret_type == pd.DataFrame else ret - - # convert to recarray - data = data.to_records(index=False) - - # build prt format recarray - ret = np.core.records.fromarrays( - [ - data["stressperiod"], - data["timestep"], - np.zeros(data.shape[0]), - data["particlegroup"], - data["sequencenumber"], - data["k"], - data["node"], - np.zeros(data.shape[0]), # todo izone? - np.zeros(data.shape[0]), # todo istatus? - np.zeros(data.shape[0]), # todo ireason? - np.zeros(data.shape[0]), # todo trelease? - data["time"], - data["x"], - data["y"], - data["z"], - np.zeros(data.shape[0], str), - ], - dtype=PRT_PATHLINE_DTYPE, - ) - - if ret_type == pd.DataFrame: - df = pd.DataFrame(ret) - df.name = df.name.astype(pd.StringDtype()) - return df + if rt == pd.DataFrame: + data = pd.DataFrame(data) + if "name" in data.dtypes.keys(): + data.name = data.name.astype(pd.StringDtype()) + return data else: - return ret + return data diff --git a/flopy/utils/modpathfile.py b/flopy/utils/modpathfile.py index 813160c5d..4fe026fb5 100644 --- a/flopy/utils/modpathfile.py +++ b/flopy/utils/modpathfile.py @@ -30,6 +30,51 @@ def __init__( self.direction, ) = self.parse(filename, self.output_type) + def intersect( + self, cells, to_recarray + ) -> Union[List[np.recarray], np.recarray]: + if self.version < 7: + try: + raslice = self._data[["k", "i", "j"]] + except (KeyError, ValueError): + raise KeyError( + "could not extract 'k', 'i', and 'j' keys " + "from {} data".format(self.output_type.lower()) + ) + else: + try: + raslice = self._data[["node"]] + except (KeyError, ValueError): + msg = "could not extract 'node' key from {} data".format( + self.output_type.lower() + ) + raise KeyError(msg) + if isinstance(cells, (list, tuple)): + allint = all(isinstance(el, int) for el in cells) + # convert to a list of tuples + if allint: + t = [] + for el in cells: + t.append((el,)) + cells = t + + cells = np.array(cells, dtype=raslice.dtype) + inds = np.in1d(raslice, cells) + epdest = self._data[inds].copy().view(np.recarray) + + if to_recarray: + # use particle ids to get the rest of the paths + inds = np.in1d(self._data["particleid"], epdest.particleid) + series = self._data[inds].copy() + series.sort(order=["particleid", "time"]) + series = series.view(np.recarray) + else: + # collect unique particleids in selection + partids = np.unique(epdest["particleid"]) + series = [self.get_data(partid) for partid in partids] + + return series + @staticmethod def parse( file_path: Union[str, os.PathLike], file_type: str @@ -95,51 +140,6 @@ def parse( return modpath, compact, skiprows, version, direction - def intersect( - self, cells, to_recarray - ) -> Union[List[np.recarray], np.recarray]: - if self.version < 7: - try: - raslice = self._data[["k", "i", "j"]] - except (KeyError, ValueError): - raise KeyError( - "could not extract 'k', 'i', and 'j' keys " - "from {} data".format(self.output_type.lower()) - ) - else: - try: - raslice = self._data[["node"]] - except (KeyError, ValueError): - msg = "could not extract 'node' key from {} data".format( - self.output_type.lower() - ) - raise KeyError(msg) - if isinstance(cells, (list, tuple)): - allint = all(isinstance(el, int) for el in cells) - # convert to a list of tuples - if allint: - t = [] - for el in cells: - t.append((el,)) - cells = t - - cells = np.array(cells, dtype=raslice.dtype) - inds = np.in1d(raslice, cells) - epdest = self._data[inds].copy().view(np.recarray) - - if to_recarray: - # use particle ids to get the rest of the paths - inds = np.in1d(self._data["particleid"], epdest.particleid) - series = self._data[inds].copy() - series.sort(order=["particleid", "time"]) - series = series.view(np.recarray) - else: - # collect unique particleids in selection - partids = np.unique(epdest["particleid"]) - series = [self.get_data(partid) for partid in partids] - - return series - class PathlineFile(ModpathFile): """ diff --git a/flopy/utils/particletrackfile.py b/flopy/utils/particletrackfile.py index 5029c5860..bfd481178 100644 --- a/flopy/utils/particletrackfile.py +++ b/flopy/utils/particletrackfile.py @@ -40,11 +40,15 @@ class ParticleTrackFile(ABC): """ + dtype = MIN_PARTICLE_TRACK_DTYPE + """ + Minimal data shared by all particle track file formats. + """ + + # legacy, todo: deprecate outdtype = MIN_PARTICLE_TRACK_DTYPE """ - Minimal information shared by all particle track file formats. - Track data are converted to this dtype for internal storage - and for return from (sub-)class methods. + Minimal data shared by all particle track file formats. """ def __init__( @@ -173,10 +177,8 @@ def get_destination_data( Returns ------- - data : np.recarray - Slice of data array (e.g. PathlineFile._data, TimeseriesFile._data) - containing endpoint, pathline, or timeseries data that intersect - (k,i,j) or (node) dest_cells. + np.recarray or list of np.recarray + Pathline components intersecting (k,i,j) or (node) dest_cells. """ diff --git a/flopy/utils/prtfile.py b/flopy/utils/prtfile.py new file mode 100644 index 000000000..2fdabcdc1 --- /dev/null +++ b/flopy/utils/prtfile.py @@ -0,0 +1,166 @@ +""" +Support for MODFLOW 6 PRT output files. +""" + +import os +from pathlib import Path +from typing import Dict, List, Optional, Union + +import numpy as np +import pandas as pd + +from flopy.utils.particletrackfile import ParticleTrackFile + + +class PathlineFile(ParticleTrackFile): + """Provides MODFLOW 6 prt output file support.""" + + key_cols = ["imdl", "iprp", "irpt", "trelease"] + """Columns making up each particle's composite key.""" + + dtypes = { + "base": np.dtype( + [ + ("kper", np.int32), + ("kstp", np.int32), + ("imdl", np.int32), + ("iprp", np.int32), + ("irpt", np.int32), + ("ilay", np.int32), + ("icell", np.int32), + ("izone", np.int32), + ("istatus", np.int32), + ("ireason", np.int32), + ("trelease", np.float32), + ("t", np.float32), + ("x", np.float32), + ("y", np.float32), + ("z", np.float32), + ("name", np.str_), + ] + ), + "full": np.dtype( + [ + ("kper", np.int32), + ("kstp", np.int32), + ("imdl", np.int32), + ("iprp", np.int32), + ("irpt", np.int32), + ("ilay", np.int32), + ("k", np.int32), # conform to canonical dtype + ("icell", np.int32), + ("izone", np.int32), + ("idest", np.int32), # destination zone, for convenience + ("dest", np.str_), # destination name, for convenience + ("istatus", np.int32), + ("ireason", np.int32), + ("trelease", np.int32), + ("t", np.int32), + ("t0", np.int32), # release time, for convenience + ("tt", np.int32), # termination time, convenience + ("time", np.int32), # conform to canonical dtype + ("x", np.int32), + ("y", np.int32), + ("z", np.int32), + ("particleid", np.int32), # conform to canonical dtype + ("name", np.str_), + ] + ), + } + + def __init__( + self, + filename: Union[str, os.PathLike], + header_filename: Optional[Union[str, os.PathLike]] = None, + destination_map: Optional[Dict[int, str]] = None, + verbose: bool = False, + ): + super().__init__(filename, verbose) + self.dtype, self._data = self._load(header_filename, destination_map) + + def _load( + self, + header_filename=None, + destination_map=None, + ) -> np.ndarray: + # load dtype from header file if present, otherwise use default dtype + hdr_fname = ( + None + if header_filename is None + else Path(header_filename).expanduser().absolute() + ) + if hdr_fname is not None and hdr_fname.is_file(): + lines = open(hdr_fname).readlines() + dtype = np.dtype( + { + "names": lines[0].strip().split(","), + "formats": lines[1].strip().split(","), + } + ) + else: + dtype = self.dtypes["base"] + + # read as binary or csv + try: + data = pd.read_csv(self.fname, dtype=dtype) + except UnicodeDecodeError: + try: + data = np.fromfile(self.fname, dtype=dtype) + except: + raise ValueError("Unreadable file, expected binary or CSV") + + # add particle id column + data = data.sort_values(self.key_cols) + data["particleid"] = data.groupby(self.key_cols).ngroup() + + # add release time and termination time columns + data["t0"] = ( + data.groupby("particleid") + .apply(lambda x: x.t.min()) + .to_frame(name="t0") + .t0 + ) + data["tt"] = ( + data.groupby("particleid") + .apply(lambda x: x.t.max()) + .to_frame(name="tt") + .tt + ) + + # assign destinations if zone map is provided + if destination_map is not None: + data["idest"] = data[data.istatus > 1].izone + data["dest"] = data.apply( + lambda row: destination_map[row.idest], + axis=1, + ) + + return dtype, data + + def intersect( + self, cells, to_recarray + ) -> Union[List[np.recarray], np.recarray]: + if isinstance(cells, (list, tuple)): + allint = all(isinstance(el, int) for el in cells) + if allint: + t = [] + for el in cells: + t.append((el,)) + cells = t + + icell = self._data[["icell"]] + cells = np.array(cells, dtype=icell.dtype) + inds = np.in1d(icell, cells) + epdest = self._data[inds].copy().view(np.recarray) + if to_recarray: + # use particle ids to get the rest of the paths + inds = np.in1d(self._data["particleid"], epdest.particleid) + series = self._data[inds].copy() + series.sort(order=["particleid", "time"]) + series = series.view(np.recarray) + else: + # collect unique particleids in selection + partids = np.unique(epdest["particleid"]) + series = [self.get_data(partid) for partid in partids] + + return series From 39b54c6a380ee73f05c0b608309eb835e8d11107 Mon Sep 17 00:00:00 2001 From: w-bonelli Date: Wed, 10 Apr 2024 21:31:18 -0400 Subject: [PATCH 2/4] prt files mostly working, minimal tests with example data (can remove and programmatically generate once prt pkg classes are in flopy) --- autotest/test_prtfile.py | 45 +++++ examples/data/mf6/prt_data/001/prt001.trk | Bin 0 -> 21600 bytes examples/data/mf6/prt_data/001/prt001.trk.csv | 181 ++++++++++++++++++ examples/data/mf6/prt_data/001/prt001.trk.hdr | 2 + flopy/utils/prtfile.py | 64 +++---- 5 files changed, 258 insertions(+), 34 deletions(-) create mode 100644 autotest/test_prtfile.py create mode 100644 examples/data/mf6/prt_data/001/prt001.trk create mode 100644 examples/data/mf6/prt_data/001/prt001.trk.csv create mode 100644 examples/data/mf6/prt_data/001/prt001.trk.hdr diff --git a/autotest/test_prtfile.py b/autotest/test_prtfile.py new file mode 100644 index 000000000..39b7850fc --- /dev/null +++ b/autotest/test_prtfile.py @@ -0,0 +1,45 @@ +from pathlib import Path + +import pytest + +from autotest.conftest import get_project_root_path +from flopy.utils.prtfile import PathlineFile + +pytestmark = pytest.mark.mf6 +prt_data_path = ( + get_project_root_path() / "examples" / "data" / "mf6" / "prt_data" / "001" +) + + +@pytest.mark.parametrize( + "path, header_path", + [ + (Path(prt_data_path) / "prt001.trk", None), + ( + Path(prt_data_path) / "prt001.trk", + Path(prt_data_path) / "prt001.trk.hdr", + ), + (Path(prt_data_path) / "prt001.trk.csv", None), + ], +) +def test_init(path, header_path): + file = PathlineFile(path, header_filename=header_path) + assert file.fname == path + assert file.dtype == PathlineFile.dtypes["full"] + if path.suffix == ".csv": + assert len(file._data) == len(open(path).readlines()) - 1 + + +@pytest.mark.parametrize( + "path", + [ + Path(prt_data_path) / "prt001.trk", + Path(prt_data_path) / "prt001.trk.csv", + ], +) +def test_intersect(path): + file = PathlineFile(path) + nodes = [1, 11, 21] + intersection = file.intersect(nodes) + assert any(intersection) + assert all(d.icell in nodes for d in intersection.itertuples()) diff --git a/examples/data/mf6/prt_data/001/prt001.trk b/examples/data/mf6/prt_data/001/prt001.trk new file mode 100644 index 0000000000000000000000000000000000000000..593ca093024949adc2187775003b7f2680361a7c GIT binary patch literal 21600 zcmcJX2~-qE9>yEw7)5Y^0XYU88WjXo1Vt|CQV~32Kv_|fh#Cy8LgKMOqk@=-28_3u z1Uvu(@mda1QL>(E6ft-tq6i)V#K>V33=uQ|wu`Rnh0vAv>~8A4p{Kj%^>4m^Rabq~ zRFxJ*QCfr-d$~Oi9{-w>lVHkc+}aZjxcqV^Xj+g9O2ajOlSoW>9VVR5ol(@L#p@qD z@y%hZ3YMx1mY!y~>onkw(zyoQjk<8xBs*k3+EL4t92l{+d2<2cYGmV@5xA`{etBl# zPJyGnR(~{6^CRM_W#jfIaMRz(uARQ$5bXFNS#75(L0k*jxPuAY{deY0^FG-ZXkuJH zw={Z)xGLGWZUk;dcw|G_0b3BEl{t0c*N+faDI0e*fjg*IO7!LT27~zfs_QMgYY|r= z8#jQ!RnPjuGUlzJh*QsK&kB4})z(8c?gRq&k@y7UHKelK@dMVks(+FH^ z({l&ncX}WW1zg8kB^dL3>Fq`^fm?Te-$H|QZ=ibDcEEf-zV%4Abna{d_s3ehtT}35 zq*noY-K(ZI@bybNHR5#@)x?#eFmuaaG`) zOQYih1KfT{=guW??Z-Wh8280E#I*p1B@3;zjB$G@ojZ@f-P+<8ulv3q;;O+U_3yK3 z8{9ug=e|$irdiiMw)z8eHQ-KG{?Eq~aDOPB8%E%s2)J-3VD@;Vmj=lzuB=Rq#Qnc? z?tH}6Wsg13dwA$Bt~Pt`j@|~o7fPJJDrCaj6Aie}bkghkH}*<{8Tcr~?Z_T%o;%ZQ z#093g4W=8Yosy4B@>hMArT6T8_bGGT$2#p~Y#HKeWYbIXS5?*Pbwdg$pc^@9&on>W zP6_%eQE~3i6zQW5 zd%ic$3HmEhTt|Z5^*0I*4IX3xW}eZpbbf{VKl!*L2;42MM=nLX(;#48p;P)>FHxKY zeVr)1z69=DzK4$g=I98*Z#>i3__QLfpsy3fCHbpIXR7l`PdTCTr9iQNy~)XJJ#;R; z-Iz$wt6y?*-Go!FU|L0uLdEka*|;Qsb+s;JMs}D=iAuFFpSG@VUeh^qoj{=h{qUgGCTIyZ#C&03_JVPoJ8 zcDC8a{Bj0AxAJl461ZzW^}N-3-3RHl0K=bKPcCb~?WJ_R^9bCam$e4#qOpHggF|j7 z%!?V^KS<}kPvAzmjx5N`Vvt@9@VVkuws$4&52bTS{_5>ZON-Ze0>q_3z(wvy@|gP;&x=KgU(Ni zkNttkSuvv_@56hjUyzSW@>hwJ!{zEl&zan%hga;)`vq|Y{go)aB!Bfx|LknNy|^E! zXCiDYsn`jE{z??roGY+SWeqR6M0_?-J53A3OkwmMeuLX7`M4SaH)Pe#DO0a1LFfTOp) zlRm99LFdxBBMEUH6FP5t+f6%gsQ(Nj?`=I0PteziiZcpemwMHA{P-i+;|3$10?>gb zN#`c_l1(qVQ5Wu?&vY-94{!!mPu!QK@H|03DN64oic1~aOn7Cu&G$}wp1J@oR|%Gt z957DaisLQ4d?#}^?xHvF^|LGM=evQB=|v@36}Ud(c0@XtiR8Cgn_Cq>%27&w2(bdhbQ}MW{8aO)ohd)lk z?WJ_?Oagb-hmDi^Uhn`74S$+s1mJdAKJNPj?rd+rs?1VP6lV=^H2J9ci{rRIl&+WL zui|#TxTL=i`*s?f9@6xUQv~k+rE^LCs=K&4YVo|II|D2~zBii&8! zeWsJ%5pUwJgt#5qs|O~hH|5-7C^KgM^y!aLGcF&O~5+_40%QTj^LZ^sM+TY{f#cIN97E@$c7w+P(eGUJu6JX}$nEr3JA zu3{_xd6LeZLg4x-4_|nY;06Y~SLhhZw-*%p9@4oae|62Mf8d(sBM?Uob{MaXUBtJS z(zzsmb$79I7}XE^Newu@Aal;fjktf1&YeYwv#FM;_sw_RLG|`y@qv8*C+O=$J-;M> z<@9du#a*wkUK(V7>SlAA@BgLiCHbrF;=t!dN>G0y#H92cCRf4+8g` zx~|GIbO_RG0q!*EEYtAumflW!61Yjnj{Oqp?`C{2HkW4S3#{;6hsdx4*0>D;#o-1EO14ms(K{gnp%SbM>j z{a*^{m(C3&a5vpiesGTE>J3dT)#LjE`M4y1RabgW{p|cmq>lz`dPiJ$r*Jy;mOQX;U=f%f}`8tM20Ju{xoz!QKWR*-M=g_xs=@=5X6Xc80qh;4(YK z+5JuYl@PZhyR@fYT>JP{JE)#Xsr>xkhY!=TakV)CowUPCMT`Hr5RPA^ulgXq&vHE8 zO#_%mQPTC2{MFHz%a#W0_!Y;Yz@B`^uP7h`Vu@IKNKLecH&5Uu}zBJYiFf0piKWCHbqm5q&&FIX*N|q_oE9mP)Jx3&el~}XQE0i6-;=Yy&JUV{!{(*dJbS_=* z0Pe2W6ctBYqQp5DWbUoD?MXIlo}-wXOHQF=-Ks=K)QYyhFJ!QKWRxw!uSZK~jMG{rJHy=$a5tc8gS>*_4YtFhFUx7El2aOtS((^(^{;Lcr-dn=aT%DH=2LNejPR`z}w~?OkACy zuM<_iB!Be|nt#P{mB8o4RpWdc`M7N$5xBl+{uP&P0qm9(e^NP0&`*leYfa!b`*HKH!tO42)09~O z?qh;}QWTfuubR>PE6mg0RORo&^QAvWg9v(QH2=y+<=o`zf#WOa>qO~wA#j7x{3|Y7 z1HL}&<}kG%u3ysi4kvIAqxn}p=}|`>@XwE+pA@Cno4{R&=3jkmcx>HEzP%9ib)vW= ze{~VfzdGeLD)=JbJ_`CPQCyP03Pkg-xNy**w_304#1h;;NH5Y#U#pe^?`azCU&zu>xfuh#R$0hlztgIwmxPFl1Qovxz?Qz=9 z@^MN2%E)7#4qQLTaTTEQt2HIx{XnC0>2cQKZrrJ?;iXe4lPS1Q(+{XlcohUBJUulBQ+=A-|3xj)`xbgiT?w_Q`ndGkq6rU)A>j%w0 zsL!_H`+GruB}y;JUv(F^CtF}z=mpCTz86aG=_>zQRP>Yyp=^taitGUQnND%`d=q~q z#O=sFXrkS>n%)13j@+7|GxOm7S121ie-fs?k|rOQ2(5wA~{Zl{Z=l1=txN{3IDc~RBD=*W4JC|Oc;kYbJf8{RgVG;xP zziQ89=ZzSK)u}-Ujn1WW;r}Zzb>aGX}B!BhK zxmO;+{jb_F?7We^)wMGlqH#YgU9Ty3<1Tu8ez!Xc?tfLBx#Us{&lB`@qV$sdm6@5- zBDnt*_qAxy*V3+V3LkIjdixRd7M{Hx4fnr#r+VL40epQD^mU^2IuN+3rsGH8{#Si} zYmWPje|`jgohYs=fqQkV_kOtlmETSW(=@)l5cGATxFmmNn>i*J?ti6di*8)Ow~vDU zN)(smuM!p{FNXVH+1i-YnXka@qoBVM#U=Tx-RqfaaQ~|^58A0T4Rdk-B)vY9{8jc* b^RsaOt1UY{?hNAldqICCN-xP@$;JI27uj@e literal 0 HcmV?d00001 diff --git a/examples/data/mf6/prt_data/001/prt001.trk.csv b/examples/data/mf6/prt_data/001/prt001.trk.csv new file mode 100644 index 000000000..0b846c480 --- /dev/null +++ b/examples/data/mf6/prt_data/001/prt001.trk.csv @@ -0,0 +1,181 @@ +kper,kstp,imdl,iprp,irpt,ilay,icell,izone,istatus,ireason,trelease,t,x,y,z,name +1,1,1,1,1,1,1,0,1,0,.000000000000000,.000000000000000,.1000000000000000,9.100000000000000,.5000000000000000,PRP000000001 +1,1,1,1,1,1,1,0,1,1,.000000000000000,.6345986517174945E-01,.1111111111111111,9.000000000000000,.5000000000000000,PRP000000001 +1,1,1,1,1,1,11,0,1,1,.000000000000000,.8304308339182425,.1840201091047777,8.000000000000000,.5000000000000000,PRP000000001 +1,1,1,1,1,1,21,0,1,1,.000000000000000,2.026389948294649,.2675956324493809,7.000000000000000,.5000000000000000,PRP000000001 +1,1,1,1,1,1,31,0,1,1,.000000000000000,3.704265435673124,.3606040744154423,6.000000000000000,.5000000000000000,PRP000000001 +1,1,1,1,1,1,41,0,1,1,.000000000000000,5.928937232170627,.4696105785642022,5.000000000000000,.5000000000000000,PRP000000001 +1,1,1,1,1,1,51,0,1,1,.000000000000000,8.827842277987040,.6123549075346494,4.000000000000000,.5000000000000000,PRP000000001 +1,1,1,1,1,1,61,0,1,1,.000000000000000,12.68120183411981,.8315419220952132,3.000000000000000,.5000000000000000,PRP000000001 +1,1,1,1,1,1,71,0,1,1,.000000000000000,15.14939193559672,1.000000000000000,2.499948317631721,.5000000000000000,PRP000000001 +1,1,1,1,1,1,72,0,1,1,.000000000000000,18.21418587905050,1.255970885130385,2.000000000000000,.5000000000000000,PRP000000001 +1,1,1,1,1,1,82,0,1,1,.000000000000000,24.57630394377621,2.000000000000000,1.255876098210262,.5000000000000000,PRP000000001 +1,1,1,1,1,1,83,0,1,1,.000000000000000,27.64079914213009,2.499702722804964,1.000000000000000,.5000000000000000,PRP000000001 +1,1,1,1,1,1,93,0,1,1,.000000000000000,30.11083496453177,3.000000000000000,.8314848596408819,.5000000000000000,PRP000000001 +1,1,1,1,1,1,94,0,1,1,.000000000000000,33.96453146615585,4.000000000000000,.6122853298161113,.5000000000000000,PRP000000001 +1,1,1,1,1,1,95,0,1,1,.000000000000000,36.86342560027116,5.000000000000000,.4694741172286391,.5000000000000000,PRP000000001 +1,1,1,1,1,1,96,0,1,1,.000000000000000,39.08799504314657,6.000000000000000,.3604128606742599,.5000000000000000,PRP000000001 +1,1,1,1,1,1,97,0,1,1,.000000000000000,40.76579383248019,7.000000000000000,.2674032132676457,.5000000000000000,PRP000000001 +1,1,1,1,1,1,98,0,1,1,.000000000000000,41.96175836497356,8.000000000000000,.1838899700554665,.5000000000000000,PRP000000001 +1,1,1,1,1,1,99,0,1,1,.000000000000000,42.72875484883790,9.000000000000000,.1110317990610085,.5000000000000000,PRP000000001 +1,1,1,1,1,1,100,0,5,3,.000000000000000,42.72875484883790,9.000000000000000,.1110317990610085,.5000000000000000,PRP000000001 +1,1,1,1,2,1,1,0,1,0,.000000000000000,.000000000000000,.2000000000000000,9.199999999999999,.5000000000000000,PRP000000002 +1,1,1,1,2,1,1,0,1,1,.000000000000000,.1344019587597114,.2499999999999998,9.000000000000000,.5000000000000000,PRP000000002 +1,1,1,1,2,1,11,0,1,1,.000000000000000,.9013729275062045,.4140452454857496,8.000000000000000,.5000000000000000,PRP000000002 +1,1,1,1,2,1,21,0,1,1,.000000000000000,2.097332041882611,.6020901730111069,7.000000000000000,.5000000000000000,PRP000000002 +1,1,1,1,2,1,31,0,1,1,.000000000000000,3.775207529261086,.8113591674347448,6.000000000000000,.5000000000000000,PRP000000002 +1,1,1,1,2,1,41,0,1,1,.000000000000000,5.535959045252451,1.000000000000000,5.187314016456632,.5000000000000000,PRP000000002 +1,1,1,1,2,1,42,0,1,1,.000000000000000,6.037268307514641,1.060833070496150,5.000000000000000,.5000000000000000,PRP000000002 +1,1,1,1,2,1,52,0,1,1,.000000000000000,9.106420685909395,1.393711253205838,4.000000000000000,.5000000000000000,PRP000000002 +1,1,1,1,2,1,62,0,1,1,.000000000000000,13.08389057699164,1.889676216739610,3.000000000000000,.5000000000000000,PRP000000002 +1,1,1,1,2,1,72,0,1,1,.000000000000000,13.86103602973066,2.000000000000000,2.835798404653227,.5000000000000000,PRP000000002 +1,1,1,1,2,1,73,0,1,1,.000000000000000,18.71985648382454,2.835487432134109,2.000000000000000,.5000000000000000,PRP000000002 +1,1,1,1,2,1,83,0,1,1,.000000000000000,19.49876924392792,3.000000000000000,1.889402280179934,.5000000000000000,PRP000000002 +1,1,1,1,2,1,84,0,1,1,.000000000000000,23.47729073062385,4.000000000000000,1.393288754623582,.5000000000000000,PRP000000002 +1,1,1,1,2,1,85,0,1,1,.000000000000000,26.54694563326774,5.000000000000000,1.060289778364293,.5000000000000000,PRP000000002 +1,1,1,1,2,1,86,0,1,1,.000000000000000,27.04342109091038,5.185449557844453,1.000000000000000,.5000000000000000,PRP000000002 +1,1,1,1,2,1,96,0,1,1,.000000000000000,28.80857582239105,6.000000000000000,.8107730357146236,.5000000000000000,PRP000000002 +1,1,1,1,2,1,97,0,1,1,.000000000000000,30.48637461172467,7.000000000000000,.6015415614616489,.5000000000000000,PRP000000002 +1,1,1,1,2,1,98,0,1,1,.000000000000000,31.68233914421804,8.000000000000000,.4136728888653385,.5000000000000000,PRP000000002 +1,1,1,1,2,1,99,0,1,1,.000000000000000,32.44933562808238,9.000000000000000,.2497735197826673,.5000000000000000,PRP000000002 +1,1,1,1,2,1,100,0,5,3,.000000000000000,32.44933562808238,9.000000000000000,.2497735197826673,.5000000000000000,PRP000000002 +1,1,1,1,3,1,1,0,1,0,.000000000000000,.000000000000000,.3000000100000000,9.300000010000000,.5000000000000000,PRP000000003 +1,1,1,1,3,1,1,0,1,1,.000000000000000,.2148294796940043,.4285714489795920,9.000000000000000,.5000000000000000,PRP000000003 +1,1,1,1,3,1,11,0,1,1,.000000000000000,.9818004484404974,.7097918832037549,8.000000000000000,.5000000000000000,PRP000000003 +1,1,1,1,3,1,21,0,1,1,.000000000000000,2.076672298776422,1.000000000000000,7.070796968014615,.5000000000000000,PRP000000003 +1,1,1,1,3,1,22,0,1,1,.000000000000000,2.203617375414431,1.040006660846159,7.000000000000000,.5000000000000000,PRP000000003 +1,1,1,1,3,1,32,0,1,1,.000000000000000,4.197575775002937,1.441947406746180,6.000000000000000,.5000000000000000,PRP000000003 +1,1,1,1,3,1,42,0,1,1,.000000000000000,6.651816678925318,1.871780191137267,5.000000000000000,.5000000000000000,PRP000000003 +1,1,1,1,3,1,52,0,1,1,.000000000000000,7.437614407292212,2.000000000000000,4.721197504307768,.5000000000000000,PRP000000003 +1,1,1,1,3,1,53,0,1,1,.000000000000000,9.908198040138020,2.445425140413097,4.000000000000000,.5000000000000000,PRP000000003 +1,1,1,1,3,1,63,0,1,1,.000000000000000,12.75938925953464,3.000000000000000,3.286986537810564,.5000000000000000,PRP000000003 +1,1,1,1,3,1,64,0,1,1,.000000000000000,14.07202735612326,3.286925213146775,3.000000000000000,.5000000000000000,PRP000000003 +1,1,1,1,3,1,74,0,1,1,.000000000000000,16.92425797975483,4.000000000000000,2.445118081984156,.5000000000000000,PRP000000003 +1,1,1,1,3,1,75,0,1,1,.000000000000000,19.39266639551562,4.720393535467641,2.000000000000000,.5000000000000000,PRP000000003 +1,1,1,1,3,1,85,0,1,1,.000000000000000,20.18090342207207,5.000000000000000,1.871355668916179,.5000000000000000,PRP000000003 +1,1,1,1,3,1,86,0,1,1,.000000000000000,22.63520742498483,6.000000000000000,1.441478618996614,.5000000000000000,PRP000000003 +1,1,1,1,3,1,87,0,1,1,.000000000000000,24.62882950801743,7.000000000000000,1.039525413044098,.5000000000000000,PRP000000003 +1,1,1,1,3,1,88,0,1,1,.000000000000000,24.75426166980228,7.069965645493071,1.000000000000000,.5000000000000000,PRP000000003 +1,1,1,1,3,1,98,0,1,1,.000000000000000,25.85030962625330,8.000000000000000,.7095393993518588,.5000000000000000,PRP000000003 +1,1,1,1,3,1,99,0,1,1,.000000000000000,26.61730611011764,9.000000000000000,.4284161664224617,.5000000000000000,PRP000000003 +1,1,1,1,3,1,100,0,5,3,.000000000000000,26.61730611011764,9.000000000000000,.4284161664224617,.5000000000000000,PRP000000003 +1,1,1,1,4,1,1,0,1,0,.000000000000000,.000000000000000,.4000000100000000,9.400000009999999,.5000000000000000,PRP000000004 +1,1,1,1,4,1,1,0,1,1,.000000000000000,.3076762301867217,.6666666944444444,9.000000000000000,.5000000000000000,PRP000000004 +1,1,1,1,4,1,11,0,1,1,.000000000000000,.9240708715396757,1.000000000000000,8.158676803349870,.5000000000000000,PRP000000004 +1,1,1,1,4,1,12,0,1,1,.000000000000000,1.165313120694530,1.158689414248265,8.000000000000000,.5000000000000000,PRP000000004 +1,1,1,1,4,1,22,0,1,1,.000000000000000,2.819821295801787,1.753424270226212,7.000000000000000,.5000000000000000,PRP000000004 +1,1,1,1,4,1,32,0,1,1,.000000000000000,3.744508416699555,2.000000000000000,6.511022764165868,.5000000000000000,PRP000000004 +1,1,1,1,4,1,33,0,1,1,.000000000000000,5.060556757909990,2.377042167827183,6.000000000000000,.5000000000000000,PRP000000004 +1,1,1,1,4,1,43,0,1,1,.000000000000000,7.654780984694812,3.000000000000000,5.079379949475653,.5000000000000000,PRP000000004 +1,1,1,1,4,1,44,0,1,1,.000000000000000,7.922308268687384,3.068752132115875,5.000000000000000,.5000000000000000,PRP000000004 +1,1,1,1,4,1,54,0,1,1,.000000000000000,11.52558583084959,3.940033608666633,4.000000000000000,.5000000000000000,PRP000000004 +1,1,1,1,4,1,64,0,1,1,.000000000000000,11.75758115458850,4.000000000000000,3.940025436039324,.5000000000000000,PRP000000004 +1,1,1,1,4,1,65,0,1,1,.000000000000000,15.36106354187710,5.000000000000000,3.068656396580068,.5000000000000000,PRP000000004 +1,1,1,1,4,1,66,0,1,1,.000000000000000,15.62822018489717,5.079271523708384,3.000000000000000,.5000000000000000,PRP000000004 +1,1,1,1,4,1,76,0,1,1,.000000000000000,18.22255665479527,6.000000000000000,2.377004299951427,.5000000000000000,PRP000000004 +1,1,1,1,4,1,77,0,1,1,.000000000000000,19.53853998567526,6.511101406612429,2.000000000000000,.5000000000000000,PRP000000004 +1,1,1,1,4,1,87,0,1,1,.000000000000000,20.46286482342002,7.000000000000000,1.753519533203051,.5000000000000000,PRP000000004 +1,1,1,1,4,1,88,0,1,1,.000000000000000,22.11719341120610,8.000000000000000,1.158823369731619,.5000000000000000,PRP000000004 +1,1,1,1,4,1,89,0,1,1,.000000000000000,22.35864609922380,8.158809921188507,1.000000000000000,.5000000000000000,PRP000000004 +1,1,1,1,4,1,99,0,1,1,.000000000000000,22.97494147400282,9.000000000000000,.6667156763397112,.5000000000000000,PRP000000004 +1,1,1,1,4,1,100,0,5,3,.000000000000000,22.97494147400282,9.000000000000000,.6667156763397112,.5000000000000000,PRP000000004 +1,1,1,1,5,1,1,0,1,0,.000000000000000,.000000000000000,.5000000000000000,9.500000000000000,.5000000000000000,PRP000000005 +1,1,1,1,5,1,1,0,1,1,.000000000000000,.4174906163649280,1.000000000000000,9.000000000000000,.5000000000000000,PRP000000005 +1,1,1,1,5,1,2,0,1,1,.000000000000000,.4174906163649280,1.000000000000000,9.000000000000000,.5000000000000000,PRP000000005 +1,1,1,1,5,1,12,0,1,1,.000000000000000,1.937707010745450,2.000000000000000,8.000079469058820,.5000000000000000,PRP000000005 +1,1,1,1,5,1,13,0,1,1,.000000000000000,1.937897716737432,2.000125435399617,8.000000000000000,.5000000000000000,PRP000000005 +1,1,1,1,5,1,23,0,1,1,.000000000000000,4.337324478275523,3.000000000000000,7.000158206878864,.5000000000000000,PRP000000005 +1,1,1,1,5,1,24,0,1,1,.000000000000000,4.337822861659189,3.000207674524930,7.000000000000000,.5000000000000000,PRP000000005 +1,1,1,1,5,1,34,0,1,1,.000000000000000,7.487339238772278,4.000000000000000,6.000130787901094,.5000000000000000,PRP000000005 +1,1,1,1,5,1,35,0,1,1,.000000000000000,7.487809876540122,4.000149411454049,6.000000000000000,.5000000000000000,PRP000000005 +1,1,1,1,5,1,45,0,1,1,.000000000000000,11.08573482205339,5.000000000000000,5.000114103070649,.5000000000000000,PRP000000005 +1,1,1,1,5,1,46,0,1,1,.000000000000000,11.08614540071765,5.000114101964002,5.000000000000000,.5000000000000000,PRP000000005 +1,1,1,1,5,1,56,0,1,1,.000000000000000,14.68403544778846,6.000000000000000,4.000123769334058,.5000000000000000,PRP000000005 +1,1,1,1,5,1,57,0,1,1,.000000000000000,14.68442525571463,6.000108329612469,4.000000000000000,.5000000000000000,PRP000000005 +1,1,1,1,5,1,67,0,1,1,.000000000000000,17.83379884218993,7.000000000000000,3.000108311931065,.5000000000000000,PRP000000005 +1,1,1,1,5,1,68,0,1,1,.000000000000000,17.83405870643099,7.000082496912824,3.000000000000000,.5000000000000000,PRP000000005 +1,1,1,1,5,1,78,0,1,1,.000000000000000,20.23326379385776,8.000000000000000,2.000082483570943,.5000000000000000,PRP000000005 +1,1,1,1,5,1,79,0,1,1,.000000000000000,20.23338920270048,8.000052262118526,2.000000000000000,.5000000000000000,PRP000000005 +1,1,1,1,5,1,89,0,1,1,.000000000000000,21.75363330539578,9.000000000000000,1.000052267380652,.5000000000000000,PRP000000005 +1,1,1,1,5,1,90,0,1,1,.000000000000000,21.75366478813337,9.000020708747167,1.000000000000000,.5000000000000000,PRP000000005 +1,1,1,1,5,1,100,0,5,3,.000000000000000,21.75366478813337,9.000020708747167,1.000000000000000,.5000000000000000,PRP000000005 +1,1,1,1,6,1,1,0,1,0,.000000000000000,.000000000000000,.6000000200000000,9.600000020000000,.5000000000000000,PRP000000006 +1,1,1,1,6,1,1,0,1,1,.000000000000000,.3076762000711404,1.000000000000000,9.333333388888887,.5000000000000000,PRP000000006 +1,1,1,1,6,1,2,0,1,1,.000000000000000,.9240710314511431,1.841323406980941,9.000000000000000,.5000000000000000,PRP000000006 +1,1,1,1,6,1,12,0,1,1,.000000000000000,1.165293789563162,2.000000000000000,8.841336016860446,.5000000000000000,PRP000000006 +1,1,1,1,6,1,13,0,1,1,.000000000000000,2.819801964670423,3.000000000000000,8.246605917483054,.5000000000000000,PRP000000006 +1,1,1,1,6,1,14,0,1,1,.000000000000000,3.744607781601528,3.489037099745424,8.000000000000000,.5000000000000000,PRP000000006 +1,1,1,1,6,1,24,0,1,1,.000000000000000,5.060505748680252,4.000000000000000,7.623002001638187,.5000000000000000,PRP000000006 +1,1,1,1,6,1,25,0,1,1,.000000000000000,7.654926540429770,4.920685329586415,7.000000000000000,.5000000000000000,PRP000000006 +1,1,1,1,6,1,35,0,1,1,.000000000000000,7.922234454245331,5.000000000000000,6.931304407347144,.5000000000000000,PRP000000006 +1,1,1,1,6,1,36,0,1,1,.000000000000000,11.52551201640753,6.000000000000000,6.060030743421259,.5000000000000000,PRP000000006 +1,1,1,1,6,1,37,0,1,1,.000000000000000,11.75775770078962,6.060038924907667,6.000000000000000,.5000000000000000,PRP000000006 +1,1,1,1,6,1,47,0,1,1,.000000000000000,15.36124008807822,6.931400152333666,5.000000000000000,.5000000000000000,PRP000000006 +1,1,1,1,6,1,57,0,1,1,.000000000000000,15.62817732214757,7.000000000000000,4.920793768273893,.5000000000000000,PRP000000006 +1,1,1,1,6,1,58,0,1,1,.000000000000000,18.22271040318674,7.623039881680662,4.000000000000000,.5000000000000000,PRP000000006 +1,1,1,1,6,1,68,0,1,1,.000000000000000,19.53854331473988,8.000000000000000,3.488958488764992,.5000000000000000,PRP000000006 +1,1,1,1,6,1,69,0,1,1,.000000000000000,20.46298688791669,8.246510667273593,3.000000000000000,.5000000000000000,PRP000000006 +1,1,1,1,6,1,79,0,1,1,.000000000000000,22.11731547570277,8.841202071173459,2.000000000000000,.5000000000000000,PRP000000006 +1,1,1,1,6,1,89,0,1,1,.000000000000000,22.35872948664267,9.000000000000000,1.841215517967525,.5000000000000000,PRP000000006 +1,1,1,1,6,1,90,0,1,1,.000000000000000,22.97504784393619,9.333294402737220,1.000000000000000,.5000000000000000,PRP000000006 +1,1,1,1,6,1,100,0,5,3,.000000000000000,22.97504784393619,9.333294402737220,1.000000000000000,.5000000000000000,PRP000000006 +1,1,1,1,7,1,1,0,1,0,.000000000000000,.000000000000000,.6999999900000000,9.699999990000000,.5000000000000000,PRP000000007 +1,1,1,1,7,1,1,0,1,1,.000000000000000,.2148294796940044,1.000000000000000,9.571428551020409,.5000000000000000,PRP000000007 +1,1,1,1,7,1,2,0,1,1,.000000000000000,.9818004484404983,2.000000000000000,9.290208116796245,.5000000000000000,PRP000000007 +1,1,1,1,7,1,3,0,1,1,.000000000000000,2.076672298776415,2.929203031985378,9.000000000000000,.5000000000000000,PRP000000007 +1,1,1,1,7,1,13,0,1,1,.000000000000000,2.203617375414436,3.000000000000000,8.959993339153836,.5000000000000000,PRP000000007 +1,1,1,1,7,1,14,0,1,1,.000000000000000,4.197575775002941,4.000000000000000,8.558052593253812,.5000000000000000,PRP000000007 +1,1,1,1,7,1,15,0,1,1,.000000000000000,6.651816678925319,5.000000000000000,8.128219808862726,.5000000000000000,PRP000000007 +1,1,1,1,7,1,16,0,1,1,.000000000000000,7.437614407292165,5.278802495692215,8.000000000000000,.5000000000000000,PRP000000007 +1,1,1,1,7,1,26,0,1,1,.000000000000000,9.908198040138014,6.000000000000000,7.554574859586896,.5000000000000000,PRP000000007 +1,1,1,1,7,1,27,0,1,1,.000000000000000,12.75938925953459,6.713013462189426,7.000000000000000,.5000000000000000,PRP000000007 +1,1,1,1,7,1,37,0,1,1,.000000000000000,14.07202735612326,7.000000000000000,6.713074786853214,.5000000000000000,PRP000000007 +1,1,1,1,7,1,38,0,1,1,.000000000000000,16.92425797975478,7.554881918015834,6.000000000000000,.5000000000000000,PRP000000007 +1,1,1,1,7,1,48,0,1,1,.000000000000000,19.39266639551562,8.000000000000000,5.279606464532343,.5000000000000000,PRP000000007 +1,1,1,1,7,1,49,0,1,1,.000000000000000,20.18090342207203,8.128644331083812,5.000000000000000,.5000000000000000,PRP000000007 +1,1,1,1,7,1,59,0,1,1,.000000000000000,22.63520742498477,8.558521381003381,4.000000000000000,.5000000000000000,PRP000000007 +1,1,1,1,7,1,69,0,1,1,.000000000000000,24.62882950801737,8.960474586955897,3.000000000000000,.5000000000000000,PRP000000007 +1,1,1,1,7,1,79,0,1,1,.000000000000000,24.75426166980224,9.000000000000000,2.930034354506921,.5000000000000000,PRP000000007 +1,1,1,1,7,1,80,0,1,1,.000000000000000,25.85030962625325,9.290460600648139,2.000000000000000,.5000000000000000,PRP000000007 +1,1,1,1,7,1,90,0,1,1,.000000000000000,26.61730611011759,9.571583833577536,1.000000000000000,.5000000000000000,PRP000000007 +1,1,1,1,7,1,100,0,5,3,.000000000000000,26.61730611011759,9.571583833577536,1.000000000000000,.5000000000000000,PRP000000007 +1,1,1,1,8,1,1,0,1,0,.000000000000000,.000000000000000,.8000000100000000,9.800000010000000,.5000000000000000,PRP000000008 +1,1,1,1,8,1,1,0,1,1,.000000000000000,.1344019512308165,1.000000000000000,9.750000015625000,.5000000000000000,PRP000000008 +1,1,1,1,8,1,2,0,1,1,.000000000000000,.9013729199773104,2.000000000000000,9.585954780392077,.5000000000000000,PRP000000008 +1,1,1,1,8,1,3,0,1,1,.000000000000000,2.097332034353719,3.000000000000000,9.397909864619525,.5000000000000000,PRP000000008 +1,1,1,1,8,1,4,0,1,1,.000000000000000,3.775207521732195,4.000000000000000,9.188640883275196,.5000000000000000,PRP000000008 +1,1,1,1,8,1,5,0,1,1,.000000000000000,5.535959564152013,4.812686202004050,9.000000000000000,.5000000000000000,PRP000000008 +1,1,1,1,8,1,15,0,1,1,.000000000000000,6.037268254292059,5.000000000000000,8.939167000453050,.5000000000000000,PRP000000008 +1,1,1,1,8,1,16,0,1,1,.000000000000000,9.106420632686824,6.000000000000000,8.606288836540338,.5000000000000000,PRP000000008 +1,1,1,1,8,1,17,0,1,1,.000000000000000,13.08389052376908,7.000000000000000,8.110323902719321,.5000000000000000,PRP000000008 +1,1,1,1,8,1,18,0,1,1,.000000000000000,13.86103684171424,7.164201773142255,8.000000000000000,.5000000000000000,PRP000000008 +1,1,1,1,8,1,28,0,1,1,.000000000000000,18.71985641123550,8.000000000000000,7.164512745583430,.5000000000000000,PRP000000008 +1,1,1,1,8,1,29,0,1,1,.000000000000000,19.49877003654247,8.110597839292595,7.000000000000000,.5000000000000000,PRP000000008 +1,1,1,1,8,1,39,0,1,1,.000000000000000,23.47729152323840,8.606711335111470,6.000000000000000,.5000000000000000,PRP000000008 +1,1,1,1,8,1,49,0,1,1,.000000000000000,26.54694642588228,8.939710292575988,5.000000000000000,.5000000000000000,PRP000000008 +1,1,1,1,8,1,59,0,1,1,.000000000000000,27.04342131175283,9.000000000000000,4.814550660372381,.5000000000000000,PRP000000008 +1,1,1,1,8,1,60,0,1,1,.000000000000000,28.80857656933874,9.189227014975872,4.000000000000000,.5000000000000000,PRP000000008 +1,1,1,1,8,1,70,0,1,1,.000000000000000,30.48637535867236,9.398458476147445,3.000000000000000,.5000000000000000,PRP000000008 +1,1,1,1,8,1,80,0,1,1,.000000000000000,31.68233989116573,9.586327136997982,2.000000000000000,.5000000000000000,PRP000000008 +1,1,1,1,8,1,90,0,1,1,.000000000000000,32.44933637503007,9.750226495833470,1.000000000000000,.5000000000000000,PRP000000008 +1,1,1,1,8,1,100,0,5,3,.000000000000000,32.44933637503007,9.750226495833470,1.000000000000000,.5000000000000000,PRP000000008 +1,1,1,1,9,1,1,0,1,0,.000000000000000,.000000000000000,.8999999800000000,9.899999980000000,.5000000000000000,PRP000000009 +1,1,1,1,9,1,1,0,1,1,.000000000000000,.6345987855645296E-01,1.000000000000000,9.888888864197531,.5000000000000000,PRP000000009 +1,1,1,1,9,1,2,0,1,1,.000000000000000,.8304308473029468,2.000000000000000,9.815979850001865,.5000000000000000,PRP000000009 +1,1,1,1,9,1,3,0,1,1,.000000000000000,2.026389961679355,3.000000000000000,9.732404308084922,.5000000000000000,PRP000000009 +1,1,1,1,9,1,4,0,1,1,.000000000000000,3.704265449057832,4.000000000000000,9.639395845450316,.5000000000000000,PRP000000009 +1,1,1,1,9,1,5,0,1,1,.000000000000000,5.928937245555336,5.000000000000000,9.530389317077885,.5000000000000000,PRP000000009 +1,1,1,1,9,1,6,0,1,1,.000000000000000,8.827842291371752,6.000000000000000,9.387644956386470,.5000000000000000,PRP000000009 +1,1,1,1,9,1,7,0,1,1,.000000000000000,12.68120184750453,7.000000000000000,9.168457893117671,.5000000000000000,PRP000000009 +1,1,1,1,9,1,8,0,1,1,.000000000000000,15.14938897572776,7.500051133825171,9.000000000000000,.5000000000000000,PRP000000009 +1,1,1,1,9,1,18,0,1,1,.000000000000000,18.21418594018942,8.000000000000000,8.744028834033367,.5000000000000000,PRP000000009 +1,1,1,1,9,1,19,0,1,1,.000000000000000,24.57630095531389,8.744123620991481,8.000000000000000,.5000000000000000,PRP000000009 +1,1,1,1,9,1,29,0,1,1,.000000000000000,27.64079917480293,9.000000000000000,7.500296728865826,.5000000000000000,PRP000000009 +1,1,1,1,9,1,30,0,1,1,.000000000000000,30.11083202407352,9.168514955661681,7.000000000000000,.5000000000000000,PRP000000009 +1,1,1,1,9,1,40,0,1,1,.000000000000000,33.96452852569760,9.387714534177162,6.000000000000000,.5000000000000000,PRP000000009 +1,1,1,1,9,1,50,0,1,1,.000000000000000,36.86342265981291,9.530525778487238,5.000000000000000,.5000000000000000,PRP000000009 +1,1,1,1,9,1,60,0,1,1,.000000000000000,39.08799210268831,9.639587059267356,4.000000000000000,.5000000000000000,PRP000000009 +1,1,1,1,9,1,70,0,1,1,.000000000000000,40.76579089202193,9.732596727334171,3.000000000000000,.5000000000000000,PRP000000009 +1,1,1,1,9,1,80,0,1,1,.000000000000000,41.96175542451530,9.816109989097118,2.000000000000000,.5000000000000000,PRP000000009 +1,1,1,1,9,1,90,0,1,1,.000000000000000,42.72875190837964,9.888968176275537,1.000000000000000,.5000000000000000,PRP000000009 +1,1,1,1,9,1,100,0,5,3,.000000000000000,42.72875190837964,9.888968176275537,1.000000000000000,.5000000000000000,PRP000000009 diff --git a/examples/data/mf6/prt_data/001/prt001.trk.hdr b/examples/data/mf6/prt_data/001/prt001.trk.hdr new file mode 100644 index 000000000..c5fb8481e --- /dev/null +++ b/examples/data/mf6/prt_data/001/prt001.trk.hdr @@ -0,0 +1,2 @@ +kper,kstp,imdl,iprp,irpt,ilay,icell,izone,istatus,ireason,trelease,t,x,y,z,name + Union[List[np.recarray], np.recarray]: - if isinstance(cells, (list, tuple)): - allint = all(isinstance(el, int) for el in cells) - if allint: - t = [] - for el in cells: - t.append((el,)) - cells = t - - icell = self._data[["icell"]] - cells = np.array(cells, dtype=icell.dtype) - inds = np.in1d(icell, cells) - epdest = self._data[inds].copy().view(np.recarray) + """Find intersection of pathlines with cells.""" + + if not all(isinstance(nn, int) for nn in cells): + raise ValueError("Expected integer cell numbers") + + idxs = np.in1d(self._data[["icell"]], np.array(cells, dtype=np.int32)) + sect = self._data[idxs].copy() + pids = np.unique(sect["particleid"]) if to_recarray: - # use particle ids to get the rest of the paths - inds = np.in1d(self._data["particleid"], epdest.particleid) - series = self._data[inds].copy() - series.sort(order=["particleid", "time"]) - series = series.view(np.recarray) + idxs = np.in1d(sect["particleid"], pids) + return sect[idxs].sort_values(by=["particleid", "time"]) else: - # collect unique particleids in selection - partids = np.unique(epdest["particleid"]) - series = [self.get_data(partid) for partid in partids] - - return series + return [self.get_data(pid) for pid in pids] From e19eec212cddb4de77945f772d97e03089d1dd15 Mon Sep 17 00:00:00 2001 From: wpbonelli Date: Wed, 22 May 2024 19:29:45 -0400 Subject: [PATCH 3/4] dtype and dtypes properties, validate method, prep tests for models instead of hardcoded data, todo: generate and check in prt components --- autotest/test_mp6.py | 4 +- autotest/test_plotutil.py | 759 ++++++++++++++----------------- autotest/test_prtfile.py | 41 +- flopy/plot/plotutil.py | 12 +- flopy/utils/modpathfile.py | 384 ++++++++-------- flopy/utils/particletrackfile.py | 32 +- flopy/utils/prtfile.py | 99 ++-- 7 files changed, 625 insertions(+), 706 deletions(-) diff --git a/autotest/test_mp6.py b/autotest/test_mp6.py index 73935a1d4..11e8b9034 100644 --- a/autotest/test_mp6.py +++ b/autotest/test_mp6.py @@ -308,9 +308,9 @@ def test_loadtxt(function_tmpdir, mp6_test_path): pthfile = function_tmpdir / "EXAMPLE-3.pathline" pthld = PathlineFile(pthfile) - ra = loadtxt(pthfile, delimiter=" ", skiprows=3, dtype=pthld.dtype) + ra = loadtxt(pthfile, delimiter=" ", skiprows=3, dtype=pthld._dtype) ra2 = loadtxt( - pthfile, delimiter=" ", skiprows=3, dtype=pthld.dtype, use_pandas=False + pthfile, delimiter=" ", skiprows=3, dtype=pthld._dtype, use_pandas=False ) assert np.array_equal(ra, ra2) diff --git a/autotest/test_plotutil.py b/autotest/test_plotutil.py index bc5fbb05a..9ed040c54 100644 --- a/autotest/test_plotutil.py +++ b/autotest/test_plotutil.py @@ -1,483 +1,393 @@ +from pathlib import Path + import numpy as np import pandas as pd import pytest +import flopy from flopy.plot.plotutil import ( to_mp7_endpoints, to_mp7_pathlines, to_prt_pathlines, ) -from flopy.utils.modpathfile import ( - EndpointFile as MpEndpointFile, -) -from flopy.utils.modpathfile import ( - PathlineFile as MpPathlineFile, -) +from flopy.utils.modpathfile import EndpointFile as MpEndpointFile +from flopy.utils.modpathfile import PathlineFile as MpPathlineFile from flopy.utils.prtfile import PathlineFile as PrtPathlineFile -PRT_TEST_PATHLINES = pd.DataFrame.from_records( - [ - [ - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 0, - 0, - 0, - 0.0, - 0.000000, - 0.100000, - 9.1, - 0.5, - "PRP000000001", - ], - [ - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 0, - 1, - 1, - 0.0, - 0.063460, - 0.111111, - 9.0, - 0.5, - "PRP000000001", - ], - [ - 1, - 1, - 1, - 1, - 1, - 1, - 11, - 0, - 1, - 1, - 0.0, - 0.830431, - 0.184020, - 8.0, - 0.5, - "PRP000000001", - ], - [ - 1, - 1, - 1, - 1, - 1, - 1, - 21, - 0, - 1, - 1, - 0.0, - 2.026390, - 0.267596, - 7.0, - 0.5, - "PRP000000001", - ], - [ - 1, - 1, - 1, - 1, - 1, - 1, - 31, - 0, - 1, - 1, - 0.0, - 3.704265, - 0.360604, - 6.0, - 0.5, - "PRP000000001", - ], - [ - 1, - 1, - 1, - 1, - 1, - 1, - 60, - 0, - 1, - 1, - 0.0, - 39.087992, - 9.639587, - 4.0, - 0.5, - "PRP000000001", - ], - [ - 1, - 1, - 1, - 1, - 1, - 1, - 70, - 0, - 1, - 1, - 0.0, - 40.765791, - 9.732597, - 3.0, - 0.5, - "PRP000000001", - ], - [ - 1, - 1, - 1, - 1, - 1, - 1, - 80, - 0, - 1, - 1, - 0.0, - 41.961755, - 9.816110, - 2.0, - 0.5, - "PRP000000001", - ], - [ - 1, - 1, - 1, - 1, - 1, - 1, - 90, - 0, - 1, - 1, - 0.0, - 42.728752, - 9.888968, - 1.0, - 0.5, - "PRP000000001", - ], - [ - 1, # kper - 1, # kstp - 1, # imdl - 1, # iprp - 1, # irpt - 1, # ilay - 100, # icell - 0, # izone - 5, # istatus - 3, # ireason - 0.0, # trelease - 42.728752, # t - 9.888968, # x - 1.0, # y - 0.5, # z - "PRP000000001", # name - ], - ], - columns=PrtPathlineFile.dtypes["base"].names, -) -MP7_TEST_PATHLINES = pd.DataFrame.from_records( - [ - [ - 1, # particleid - 1, # particlegroup - 1, # sequencenumber - 1, # particleidloc - 0.0, # time - 1.0, # x - 2.0, # y - 3.0, # z - 1, # k - 1, # node - 0.1, # xloc - 0.1, # yloc - 0.1, # zloc - 1, # stressperiod - 1, # timestep - ], - [ - 1, - 1, - 1, - 1, - 1.0, # time - 2.0, # x - 3.0, # y - 4.0, # z - 2, # k - 2, # node - 0.9, # xloc - 0.9, # yloc - 0.9, # zloc - 1, # stressperiod - 1, # timestep - ], - ], - columns=MpPathlineFile.dtypes[7].names, -) -MP7_TEST_ENDPOINTS = pd.DataFrame.from_records( - [ - [ - 1, # particleid - 1, # particlegroup - 1, # particleidloc - 2, # status (terminated at boundary face) - 0.0, # time0 - 1.0, # time - 1, # node0 - 1, # k0 - 0.1, # xloc0 - 0.1, # yloc0 - 0.1, # zloc0 - 0.0, # x0 - 1.0, # y0 - 2.0, # z0 - 1, # zone0 - 1, # initialcellface - 5, # node - 2, # k - 0.9, # xloc - 0.9, # yloc - 0.9, # zloc - 10.0, # x - 11.0, # y - 12.0, # z - 2, # zone - 2, # cellface + +nlay = 1 +nrow = 10 +ncol = 10 +top = 1.0 +botm = [0.0] +nper = 1 +perlen = 1.0 +nstp = 1 +tsmult = 1.0 +porosity = 0.1 + + +def get_partdata(grid, rpts): + """ + Make a flopy.modpath.ParticleData from the given grid and release points. + """ + + if grid.grid_type == "structured": + return flopy.modpath.ParticleData( + partlocs=[grid.get_lrc(p[0])[0] for p in rpts], + structured=True, + localx=[p[1] for p in rpts], + localy=[p[2] for p in rpts], + localz=[p[3] for p in rpts], + timeoffset=0, + drape=0, + ) + else: + return flopy.modpath.ParticleData( + partlocs=[p[0] for p in rpts], + structured=False, + localx=[p[1] for p in rpts], + localy=[p[2] for p in rpts], + localz=[p[3] for p in rpts], + timeoffset=0, + drape=0, + ) + + +@pytest.fixture +def gwf_sim(function_tmpdir): + gwf_ws = function_tmpdir / "gwf" + gwf_name = "plotutil_gwf" + + # create simulation + sim = flopy.mf6.MFSimulation( + sim_name=gwf_name, + exe_name="mf6", + version="mf6", + sim_ws=gwf_ws, + ) + + # create tdis package + flopy.mf6.modflow.mftdis.ModflowTdis( + sim, + pname="tdis", + time_units="DAYS", + nper=nper, + perioddata=[ + (perlen, nstp, tsmult) ], - [ - 2, # particleid - 1, # particlegroup - 2, # particleidloc - 2, # status (terminated at boundary face) - 0.0, # time0 - 2.0, # time - 1, # node0 - 1, # k0 - 0.1, # xloc0 - 0.1, # yloc0 - 0.1, # zloc0 - 0.0, # x0 - 1.0, # y0 - 2.0, # z0 - 1, # zone0 - 1, # initialcellface - 5, # node - 2, # k - 0.9, # xloc - 0.9, # yloc - 0.9, # zloc - 10.0, # x - 11.0, # y - 12.0, # z - 2, # zone - 2, # cellface + ) + + # create gwf model + gwf = flopy.mf6.ModflowGwf(sim, modelname=gwf_name, save_flows=True) + + # create gwf discretization + flopy.mf6.modflow.mfgwfdis.ModflowGwfdis( + gwf, + pname="dis", + nlay=nlay, + nrow=nrow, + ncol=ncol, + ) + + # create gwf initial conditions package + flopy.mf6.modflow.mfgwfic.ModflowGwfic(gwf, pname="ic") + + # create gwf node property flow package + flopy.mf6.modflow.mfgwfnpf.ModflowGwfnpf( + gwf, + pname="npf", + save_saturation=True, + save_specific_discharge=True, + ) + + # create gwf chd package + spd = { + 0: [[(0, 0, 0), 1.0, 1.0], [(0, 9, 9), 0.0, 0.0]], + 1: [[(0, 0, 0), 0.0, 0.0], [(0, 9, 9), 1.0, 2.0]], + } + chd = flopy.mf6.ModflowGwfchd( + gwf, + pname="CHD-1", + stress_period_data=spd, + auxiliary=["concentration"], + ) + + # create gwf output control package + # output file names + gwf_budget_file = f"{gwf_name}.bud" + gwf_head_file = f"{gwf_name}.hds" + oc = flopy.mf6.ModflowGwfoc( + gwf, + budget_filerecord=gwf_budget_file, + head_filerecord=gwf_head_file, + saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], + ) + + # create iterative model solution for gwf model + ims = flopy.mf6.ModflowIms(sim) + + return sim + + +@pytest.fixture +def mp7_sim(gwf_sim, function_tmpdir): + gwf = gwf_sim.get_model() + mp7_ws = function_tmpdir / "mp7" + releasepts_mp7 = [ + # node number, localx, localy, localz + (0, float(f"0.{i + 1}"), float(f"0.{i + 1}"), 0.5) + for i in range(9) + ] + + partdata = get_partdata(gwf.modelgrid, releasepts_mp7) + mp7_name = "plotutil_mp7" + pg = flopy.modpath.ParticleGroup( + particlegroupname="G1", + particledata=partdata, + filename=f"{mp7_name}.sloc", + ) + mp = flopy.modpath.Modpath7( + modelname=mp7_name, + flowmodel=gwf, + exe_name="mp7", + model_ws=mp7_ws, + headfilename=f"{gwf.name}.hds", + budgetfilename=f"{gwf.name}.bud", + ) + mpbas = flopy.modpath.Modpath7Bas( + mp, + porosity=porosity, + ) + mpsim = flopy.modpath.Modpath7Sim( + mp, + simulationtype="pathline", + trackingdirection="forward", + budgetoutputoption="summary", + stoptimeoption="extend", + particlegroups=[pg], + ) + + return mp + + +@pytest.fixture +def prt_sim(function_tmpdir): + gwf_ws = function_tmpdir / "gwf" + prt_ws = function_tmpdir / "prt" + prt_name = "plotutil_prt" + gwf_name = "plotutil_gwf" + releasepts_prt = [ + # particle index, k, i, j, x, y, z + [i, 0, 0, 0, float(f"0.{i + 1}"), float(f"9.{i + 1}"), 0.5] + for i in range(9) + ] + + # create simulation + sim = flopy.mf6.MFSimulation( + sim_name=prt_name, + exe_name="mf6", + version="mf6", + sim_ws=prt_ws, + ) + + # create tdis package + flopy.mf6.modflow.mftdis.ModflowTdis( + sim, + pname="tdis", + time_units="DAYS", + nper=nper, + perioddata=[ + ( + perlen, + nstp, + tsmult, + ) ], - [ - 3, # particleid - 1, # particlegroup - 3, # particleidloc - 2, # status (terminated at boundary face) - 0.0, # time0 - 3.0, # time - 1, # node0 - 1, # k0 - 0.1, # xloc0 - 0.1, # yloc0 - 0.1, # zloc0 - 0.0, # x0 - 1.0, # y0 - 2.0, # z0 - 1, # zone0 - 1, # initialcellface - 5, # node - 2, # k - 0.9, # xloc - 0.9, # yloc - 0.9, # zloc - 10.0, # x - 11.0, # y - 12.0, # z - 2, # zone - 2, # cellface + ) + + # create prt model + prt = flopy.mf6.ModflowPrt(sim, modelname=prt_name, save_flows=True) + + # create prt discretization + flopy.mf6.modflow.mfgwfdis.ModflowGwfdis( + prt, + pname="dis", + nlay=nlay, + nrow=nrow, + ncol=ncol, + ) + + # create mip package + flopy.mf6.ModflowPrtmip( + prt, pname="mip", porosity=porosity + ) + + # create prp package + prp_track_file = f"{prt_name}.prp.trk" + prp_track_csv_file = f"{prt_name}.prp.trk.csv" + flopy.mf6.ModflowPrtprp( + prt, + pname="prp1", + filename=f"{prt_name}_1.prp", + nreleasepts=len(releasepts_prt), + packagedata=releasepts_prt, + perioddata={0: ["FIRST"]}, + track_filerecord=[prp_track_file], + trackcsv_filerecord=[prp_track_csv_file], + stop_at_weak_sink="saws" in prt_name, + boundnames=True, + exit_solve_tolerance=1e-5, + ) + + # create output control package + prt_budget_file = f"{prt_name}.bud" + prt_track_file = f"{prt_name}.trk" + prt_track_csv_file = f"{prt_name}.trk.csv" + flopy.mf6.ModflowPrtoc( + prt, + pname="oc", + budget_filerecord=[prt_budget_file], + track_filerecord=[prt_track_file], + trackcsv_filerecord=[prt_track_csv_file], + saverecord=[("BUDGET", "ALL")], + ) + + # create the flow model interface + gwf_budget_file = gwf_ws / f"{gwf_name}.bud" + gwf_head_file = gwf_ws / f"{gwf_name}.hds" + flopy.mf6.ModflowPrtfmi( + prt, + packagedata=[ + ("GWFHEAD", gwf_head_file), + ("GWFBUDGET", gwf_budget_file), ], - ], - columns=MpEndpointFile.dtypes[7].names, -) + ) + + # add explicit model solution + ems = flopy.mf6.ModflowEms( + sim, + pname="ems", + filename=f"{prt_name}.ems", + ) + sim.register_solution_package(ems, [prt.name]) + + return sim @pytest.mark.parametrize("dataframe", [True, False]) -@pytest.mark.parametrize("source", ["prt"]) # , "mp3", "mp5", "mp6"]) -def test_to_mp7_pathlines(dataframe, source): - if source == "prt": - pls = ( - PRT_TEST_PATHLINES - if dataframe - else PRT_TEST_PATHLINES.to_records(index=False) - ) - elif source == "mp3": - pass - elif source == "mp5": - pass - elif source == "mp7": - pass - mp7_pls = to_mp7_pathlines(pls) +def test_to_mp7_pathlines(prt_sim, dataframe): + prt_pls = pd.read_csv(prt_sim.sim_path / f"{prt_sim.name}.trk.csv") + mp7_pls = to_mp7_pathlines(prt_pls) + assert ( - type(pls) + type(prt_pls) == type(mp7_pls) == (pd.DataFrame if dataframe else np.recarray) ) - assert len(mp7_pls) == 10 assert set( dict(mp7_pls.dtypes).keys() if dataframe else mp7_pls.dtype.names ) == set(MpPathlineFile.dtypes[7].names) @pytest.mark.parametrize("dataframe", [True, False]) -@pytest.mark.parametrize("source", ["prt"]) # , "mp3", "mp5", "mp6"]) -def test_to_mp7_pathlines_empty(dataframe, source): - if source == "prt": - pls = to_mp7_pathlines( - pd.DataFrame.from_records( - [], columns=PrtPathlineFile.dtypes["base"].names - ) - if dataframe - else np.recarray((0,), dtype=PrtPathlineFile.dtypes["base"]) +def test_to_mp7_pathlines_empty(dataframe): + prt_pls = ( + pd.DataFrame.from_records( + [], columns=PrtPathlineFile.dtype.names ) - elif source == "mp3": - pass - elif source == "mp5": - pass - elif source == "mp7": - pass - assert pls.empty if dataframe else pls.size == 0 + if dataframe + else np.recarray((0,), dtype=PrtPathlineFile.dtype) + ) + mp7_pls = to_mp7_pathlines(prt_pls) + + assert prt_pls.empty if dataframe else prt_pls.size == 0 if dataframe: - pls = pls.to_records(index=False) - assert pls.dtype == MpPathlineFile.dtypes[7] + mp7_pls = mp7_pls.to_records(index=False) + assert mp7_pls.dtype == MpPathlineFile.dtypes[7] @pytest.mark.parametrize("dataframe", [True, False]) -def test_to_mp7_pathlines_noop(dataframe): - pls = ( - MP7_TEST_PATHLINES - if dataframe - else MP7_TEST_PATHLINES.to_records(index=False) +def test_to_mp7_pathlines_noop(mp7_sim, dataframe): + plf = flopy.utils.PathlineFile(Path(mp7_sim.model_ws) / f"{mp7_sim.name}.mppth") + og_pls = pd.DataFrame( + plf.get_destination_pathline_data(range(mp7_sim.modelgrid.nnodes), to_recarray=True) ) - mp7_pls = to_mp7_pathlines(pls) + mp7_pls = to_mp7_pathlines(og_pls) + assert ( - type(pls) + type(mp7_pls) == type(mp7_pls) == (pd.DataFrame if dataframe else np.recarray) ) - assert len(mp7_pls) == 2 assert set( dict(mp7_pls.dtypes).keys() if dataframe else mp7_pls.dtype.names ) == set(MpPathlineFile.dtypes[7].names) assert np.array_equal( - mp7_pls if dataframe else pd.DataFrame(mp7_pls), MP7_TEST_PATHLINES + mp7_pls if dataframe else pd.DataFrame(mp7_pls), og_pls ) @pytest.mark.parametrize("dataframe", [True, False]) -@pytest.mark.parametrize("source", ["prt"]) # , "mp3", "mp5", "mp6"]) -def test_to_mp7_endpoints(dataframe, source): - if source == "prt": - eps = to_mp7_endpoints( - PRT_TEST_PATHLINES - if dataframe - else PRT_TEST_PATHLINES.to_records(index=False) - ) - elif source == "mp3": - pass - elif source == "mp5": - pass - elif source == "mp6": - pass - assert len(eps) == 1 - assert np.isclose(eps.time[0], PRT_TEST_PATHLINES.t.max()) +def test_to_mp7_endpoints(prt_sim, dataframe): + prt_pls = pd.read_csv(prt_sim.sim_path / f"{prt_sim.name}.trk.csv") + prt_eps = prt_pls[prt_pls.ireason == 3] + mp7_eps = to_mp7_endpoints(mp7_eps) + + assert np.isclose(mp7_eps.time[0], prt_eps.t.max()) assert set( - dict(eps.dtypes).keys() if dataframe else eps.dtype.names + dict(mp7_eps.dtypes).keys() if dataframe else mp7_eps.dtype.names ) == set(MpEndpointFile.dtypes[7].names) @pytest.mark.parametrize("dataframe", [True, False]) -@pytest.mark.parametrize("source", ["prt"]) # , "mp3", "mp5", "mp6"]) -def test_to_mp7_endpoints_empty(dataframe, source): - eps = to_mp7_endpoints( +def test_to_mp7_endpoints_empty(dataframe): + mp7_eps = to_mp7_endpoints( pd.DataFrame.from_records( - [], columns=PrtPathlineFile.dtypes["base"].names + [], columns=PrtPathlineFile.dtype.names ) if dataframe - else np.recarray((0,), dtype=PrtPathlineFile.dtypes["base"]) + else np.recarray((0,), dtype=PrtPathlineFile.dtype) ) - assert eps.empty if dataframe else eps.size == 0 + + assert mp7_eps.empty if dataframe else mp7_eps.size == 0 if dataframe: - eps = eps.to_records(index=False) - assert eps.dtype == MpEndpointFile.dtypes[7] + mp7_eps = mp7_eps.to_records(index=False) + assert mp7_eps.dtype == MpEndpointFile.dtypes[7] @pytest.mark.parametrize("dataframe", [True, False]) -def test_to_mp7_endpoints_noop(dataframe): +def test_to_mp7_endpoints_noop(mp7_sim, dataframe): """Test a recarray or dataframe which already contains MP7 endpoint data""" - eps = to_mp7_endpoints( - MP7_TEST_ENDPOINTS - if dataframe - else MP7_TEST_ENDPOINTS.to_records(index=False) + epf = flopy.utils.EndpointFile(Path(mp7_sim.model_ws) / f"{mp7_sim.name}.mpend") + og_eps = pd.DataFrame( + epf.get_destination_endpoint_data(range(mp7_sim.modelgrid.nnodes), to_recarray=True) ) + mp7_eps = to_mp7_endpoints(og_eps) + assert np.array_equal( - eps if dataframe else pd.DataFrame(eps), MP7_TEST_ENDPOINTS + mp7_eps if dataframe else pd.DataFrame(mp7_eps), og_eps ) @pytest.mark.parametrize("dataframe", [True, False]) -@pytest.mark.parametrize("source", ["prt"]) # , "mp3", "mp5", "mp6"]) -def test_to_prt_pathlines_roundtrip(dataframe, source): - if source == "prt": - pls = to_mp7_pathlines( - PRT_TEST_PATHLINES - if dataframe - else PRT_TEST_PATHLINES.to_records(index=False) - ) - elif source == "mp3": - pass - elif source == "mp5": - pass - elif source == "mp6": - pass - prt_pls = to_prt_pathlines(pls) +def test_to_prt_pathlines_roundtrip(prt_sim, dataframe): + og_pls = pd.read_csv(prt_sim.sim_path / f"{prt_sim.name}.trk.csv") + mp7_pls = to_mp7_pathlines( + og_pls + if dataframe + else og_pls.to_records(index=False) + ) + prt_pls = to_prt_pathlines(mp7_pls) + if not dataframe: prt_pls = pd.DataFrame(prt_pls) - # import pdb; pdb.set_trace() assert np.allclose( - PRT_TEST_PATHLINES.drop( + prt_pls.drop( ["imdl", "iprp", "irpt", "name", "istatus", "ireason"], axis=1, ), - prt_pls.drop( + og_pls.drop( ["imdl", "iprp", "irpt", "name", "istatus", "ireason"], axis=1, ), @@ -485,25 +395,18 @@ def test_to_prt_pathlines_roundtrip(dataframe, source): @pytest.mark.parametrize("dataframe", [True, False]) -@pytest.mark.parametrize("source", ["prt"]) # , "mp3", "mp5", "mp6"]) -def test_to_prt_pathlines_roundtrip_empty(dataframe, source): - if source == "prt": - pls = to_mp7_pathlines( - pd.DataFrame.from_records( - [], columns=PrtPathlineFile.dtypes["base"].names - ) - if dataframe - else np.recarray((0,), dtype=PrtPathlineFile.dtypes["base"]) +def test_to_prt_pathlines_roundtrip_empty(dataframe): + og_pls = to_mp7_pathlines( + pd.DataFrame.from_records( + [], columns=PrtPathlineFile.dtype.names ) - elif source == "mp3": - pass - elif source == "mp5": - pass - elif source == "mp6": - pass - prt_pls = to_prt_pathlines(pls) - assert pls.empty if dataframe else pls.size == 0 - assert prt_pls.empty if dataframe else pls.size == 0 + if dataframe + else np.recarray((0,), dtype=PrtPathlineFile.dtype) + ) + prt_pls = to_prt_pathlines(og_pls) + + assert og_pls.empty if dataframe else og_pls.size == 0 + assert prt_pls.empty if dataframe else og_pls.size == 0 assert set( - dict(pls.dtypes).keys() if dataframe else pls.dtype.names + dict(og_pls.dtypes).keys() if dataframe else og_pls.dtype.names ) == set(MpPathlineFile.dtypes[7].names) diff --git a/autotest/test_prtfile.py b/autotest/test_prtfile.py index 39b7850fc..61c5dfda2 100644 --- a/autotest/test_prtfile.py +++ b/autotest/test_prtfile.py @@ -1,45 +1,60 @@ -from pathlib import Path - import pytest from autotest.conftest import get_project_root_path from flopy.utils.prtfile import PathlineFile pytestmark = pytest.mark.mf6 +proj_root = get_project_root_path() prt_data_path = ( - get_project_root_path() / "examples" / "data" / "mf6" / "prt_data" / "001" + proj_root / "examples" / "data" / "mf6" / "prt_data" / "001" ) @pytest.mark.parametrize( "path, header_path", [ - (Path(prt_data_path) / "prt001.trk", None), ( - Path(prt_data_path) / "prt001.trk", - Path(prt_data_path) / "prt001.trk.hdr", + prt_data_path / "prt001.trk", + prt_data_path / "prt001.trk.hdr", ), - (Path(prt_data_path) / "prt001.trk.csv", None), + (prt_data_path / "prt001.trk.csv", None), ], ) def test_init(path, header_path): file = PathlineFile(path, header_filename=header_path) assert file.fname == path - assert file.dtype == PathlineFile.dtypes["full"] if path.suffix == ".csv": assert len(file._data) == len(open(path).readlines()) - 1 @pytest.mark.parametrize( - "path", + "path, header_path", [ - Path(prt_data_path) / "prt001.trk", - Path(prt_data_path) / "prt001.trk.csv", + ( + prt_data_path / "prt001.trk", + prt_data_path / "prt001.trk.hdr", + ), + (prt_data_path / "prt001.trk.csv", None), ], ) -def test_intersect(path): - file = PathlineFile(path) +def test_intersect(path, header_path): + file = PathlineFile(path, header_filename=header_path) nodes = [1, 11, 21] intersection = file.intersect(nodes) assert any(intersection) assert all(d.icell in nodes for d in intersection.itertuples()) + + +@pytest.mark.parametrize( + "path, header_path", + [ + ( + prt_data_path / "prt001.trk", + prt_data_path / "prt001.trk.hdr", + ), + (prt_data_path / "prt001.trk.csv", None), + ], +) +def test_validate(path, header_path): + file = PathlineFile(path, header_filename=header_path) + file.validate() diff --git a/flopy/plot/plotutil.py b/flopy/plot/plotutil.py index 320c1ae76..fba2dc062 100644 --- a/flopy/plot/plotutil.py +++ b/flopy/plot/plotutil.py @@ -2733,7 +2733,7 @@ def to_mp7_pathlines( ], dtype=MpPathlineFile.dtypes[7], ) - elif all(n in data.dtypes for n in ParticleTrackFile.dtype.names): + elif all(n in data.dtypes for n in ParticleTrackFile._dtype.names): data = np.core.records.fromarrays( [ data["particleid"], @@ -2762,7 +2762,7 @@ def to_mp7_pathlines( f"{pformat(MpPathlineFile.dtypes[6].names)} for MODPATH 6, or;\n" f"{pformat(MpPathlineFile.dtypes[7].names)} for MODPATH 7, or;\n" f"{pformat(PrtPathlineFile.dtypes['base'].names)} for MODFLOW 6 PRT, or;\n" - f"{pformat(ParticleTrackFile.dtype.names)} (minimal expected dtype names)" + f"{pformat(ParticleTrackFile._dtype.names)} (minimal expected dtype names)" ) return pd.DataFrame(data) if rt == pd.DataFrame else data @@ -2893,7 +2893,7 @@ def to_mp7_endpoints( ], dtype=MpEndpointFile.dtypes[7], ) - elif all(n in data.dtypes for n in ParticleTrackFile.dtype.names): + elif all(n in data.dtypes for n in ParticleTrackFile._dtype.names): data = np.core.records.fromarrays( [ endpts["particleid"], @@ -2933,7 +2933,7 @@ def to_mp7_endpoints( f"{pformat(MpEndpointFile.dtypes[6].names)} for MODPATH 6, or;\n" f"{pformat(MpEndpointFile.dtypes[7].names)} for MODPATH 7, or;\n" f"{pformat(PrtPathlineFile.dtypes['base'].names)} for MODFLOW 6 PRT, or;\n" - f"{pformat(ParticleTrackFile.dtype.names)} (minimal expected dtype names)" + f"{pformat(ParticleTrackFile._dtype.names)} (minimal expected dtype names)" ) return pd.DataFrame(data) if rt == pd.DataFrame else data @@ -3020,7 +3020,7 @@ def to_prt_pathlines( n in data.dtypes for n in PrtPathlineFile.dtypes["base"].names ): # already in prt format? return data if rt == pd.DataFrame else data.to_records(index=False) - elif all(n in data.dtypes for n in ParticleTrackFile.dtype.names): + elif all(n in data.dtypes for n in ParticleTrackFile._dtype.names): data = np.core.records.fromarrays( [ np.zeros(data.shape[0]), @@ -3053,7 +3053,7 @@ def to_prt_pathlines( f"{pformat(MpEndpointFile.dtypes[5].names)} for MODPATH 5 endpoints, or;\n" f"{pformat(MpEndpointFile.dtypes[3].names)} for MODPATH 3 endpoints, or;\n" f"{pformat(PrtPathlineFile.dtypes['base'].names)} for MODFLOW 6 PRT, or;\n" - f"{pformat(ParticleTrackFile.dtype.names)} (minimal expected dtype names)" + f"{pformat(ParticleTrackFile._dtype.names)} (minimal expected dtype names)" ) if rt == pd.DataFrame: diff --git a/flopy/utils/modpathfile.py b/flopy/utils/modpathfile.py index 4fe026fb5..a226d89f3 100644 --- a/flopy/utils/modpathfile.py +++ b/flopy/utils/modpathfile.py @@ -4,6 +4,7 @@ import itertools import os +from abc import ABC, abstractmethod from typing import List, Optional, Tuple, Union import numpy as np @@ -14,9 +15,14 @@ from ..utils.flopy_io import loadtxt -class ModpathFile(ParticleTrackFile): +class ModpathFile(ParticleTrackFile, ABC): """Provides MODPATH output file support.""" + @property + @abstractmethod + def dtypes(self): + pass + def __init__( self, filename: Union[str, os.PathLike], verbose: bool = False ): @@ -161,64 +167,70 @@ class PathlineFile(ModpathFile): """ - dtypes = { - **dict.fromkeys( - [3, 5], - np.dtype( + @property + def dtypes(self): + return { + **dict.fromkeys( + [3, 5], + np.dtype( + [ + ("particleid", np.int32), + ("x", np.float32), + ("y", np.float32), + ("zloc", np.float32), + ("z", np.float32), + ("time", np.float32), + ("j", np.int32), + ("i", np.int32), + ("k", np.int32), + ("cumulativetimestep", np.int32), + ] + ), + ), + 6: np.dtype( [ ("particleid", np.int32), + ("particlegroup", np.int32), + ("timepointindex", np.int32), + ("cumulativetimestep", np.int32), + ("time", np.float32), ("x", np.float32), ("y", np.float32), - ("zloc", np.float32), ("z", np.float32), - ("time", np.float32), - ("j", np.int32), + ("k", np.int32), ("i", np.int32), + ("j", np.int32), + ("grid", np.int32), + ("xloc", np.float32), + ("yloc", np.float32), + ("zloc", np.float32), + ("linesegmentindex", np.int32), + ] + ), + 7: np.dtype( + [ + ("particleid", np.int32), + ("particlegroup", np.int32), + ("sequencenumber", np.int32), + ("particleidloc", np.int32), + ("time", np.float32), + ("x", np.float32), + ("y", np.float32), + ("z", np.float32), ("k", np.int32), - ("cumulativetimestep", np.int32), + ("node", np.int32), + ("xloc", np.float32), + ("yloc", np.float32), + ("zloc", np.float32), + ("stressperiod", np.int32), + ("timestep", np.int32), ] ), - ), - 6: np.dtype( - [ - ("particleid", np.int32), - ("particlegroup", np.int32), - ("timepointindex", np.int32), - ("cumulativetimestep", np.int32), - ("time", np.float32), - ("x", np.float32), - ("y", np.float32), - ("z", np.float32), - ("k", np.int32), - ("i", np.int32), - ("j", np.int32), - ("grid", np.int32), - ("xloc", np.float32), - ("yloc", np.float32), - ("zloc", np.float32), - ("linesegmentindex", np.int32), - ] - ), - 7: np.dtype( - [ - ("particleid", np.int32), - ("particlegroup", np.int32), - ("sequencenumber", np.int32), - ("particleidloc", np.int32), - ("time", np.float32), - ("x", np.float32), - ("y", np.float32), - ("z", np.float32), - ("k", np.int32), - ("node", np.int32), - ("xloc", np.float32), - ("yloc", np.float32), - ("zloc", np.float32), - ("stressperiod", np.int32), - ("timestep", np.int32), - ] - ), - } + } + + @property + def dtype(self): + return self._dtype kijnames = [ "k", @@ -236,7 +248,7 @@ def __init__( self, filename: Union[str, os.PathLike], verbose: bool = False ): super().__init__(filename, verbose=verbose) - self.dtype, self._data = self._load() + self._dtype, self._data = self._load() self.nid = np.unique(self._data["particleid"]) def _load(self) -> Tuple[np.dtype, np.ndarray]: @@ -447,98 +459,104 @@ class EndpointFile(ModpathFile): """ - dtypes = { - **dict.fromkeys( - [3, 5], - np.dtype( + @property + def dtypes(self): + return { + **dict.fromkeys( + [3, 5], + np.dtype( + [ + ("zone", np.int32), + ("j", np.int32), + ("i", np.int32), + ("k", np.int32), + ("x", np.float32), + ("y", np.float32), + ("z", np.float32), + ("zloc", np.float32), + ("time", np.float32), + ("x0", np.float32), + ("y0", np.float32), + ("zloc0", np.float32), + ("j0", np.int32), + ("i0", np.int32), + ("k0", np.int32), + ("zone0", np.int32), + ("cumulativetimestep", np.int32), + ("ipcode", np.int32), + ("time0", np.float32), + ] + ), + ), + 6: np.dtype( [ - ("zone", np.int32), - ("j", np.int32), - ("i", np.int32), + ("particleid", np.int32), + ("particlegroup", np.int32), + ("status", np.int32), + ("time0", np.float32), + ("time", np.float32), + ("initialgrid", np.int32), + ("k0", np.int32), + ("i0", np.int32), + ("j0", np.int32), + ("cellface0", np.int32), + ("zone0", np.int32), + ("xloc0", np.float32), + ("yloc0", np.float32), + ("zloc0", np.float32), + ("x0", np.float32), + ("y0", np.float32), + ("z0", np.float32), + ("finalgrid", np.int32), ("k", np.int32), + ("i", np.int32), + ("j", np.int32), + ("cellface", np.int32), + ("zone", np.int32), + ("xloc", np.float32), + ("yloc", np.float32), + ("zloc", np.float32), ("x", np.float32), ("y", np.float32), ("z", np.float32), - ("zloc", np.float32), + ("label", "|S40"), + ] + ), + 7: np.dtype( + [ + ("particleid", np.int32), + ("particlegroup", np.int32), + ("particleidloc", np.int32), + ("status", np.int32), + ("time0", np.float32), ("time", np.float32), + ("node0", np.int32), + ("k0", np.int32), + ("xloc0", np.float32), + ("yloc0", np.float32), + ("zloc0", np.float32), ("x0", np.float32), ("y0", np.float32), - ("zloc0", np.float32), - ("j0", np.int32), - ("i0", np.int32), - ("k0", np.int32), + ("z0", np.float32), ("zone0", np.int32), - ("cumulativetimestep", np.int32), - ("ipcode", np.int32), - ("time0", np.float32), + ("initialcellface", np.int32), + ("node", np.int32), + ("k", np.int32), + ("xloc", np.float32), + ("yloc", np.float32), + ("zloc", np.float32), + ("x", np.float32), + ("y", np.float32), + ("z", np.float32), + ("zone", np.int32), + ("cellface", np.int32), ] ), - ), - 6: np.dtype( - [ - ("particleid", np.int32), - ("particlegroup", np.int32), - ("status", np.int32), - ("time0", np.float32), - ("time", np.float32), - ("initialgrid", np.int32), - ("k0", np.int32), - ("i0", np.int32), - ("j0", np.int32), - ("cellface0", np.int32), - ("zone0", np.int32), - ("xloc0", np.float32), - ("yloc0", np.float32), - ("zloc0", np.float32), - ("x0", np.float32), - ("y0", np.float32), - ("z0", np.float32), - ("finalgrid", np.int32), - ("k", np.int32), - ("i", np.int32), - ("j", np.int32), - ("cellface", np.int32), - ("zone", np.int32), - ("xloc", np.float32), - ("yloc", np.float32), - ("zloc", np.float32), - ("x", np.float32), - ("y", np.float32), - ("z", np.float32), - ("label", "|S40"), - ] - ), - 7: np.dtype( - [ - ("particleid", np.int32), - ("particlegroup", np.int32), - ("particleidloc", np.int32), - ("status", np.int32), - ("time0", np.float32), - ("time", np.float32), - ("node0", np.int32), - ("k0", np.int32), - ("xloc0", np.float32), - ("yloc0", np.float32), - ("zloc0", np.float32), - ("x0", np.float32), - ("y0", np.float32), - ("z0", np.float32), - ("zone0", np.int32), - ("initialcellface", np.int32), - ("node", np.int32), - ("k", np.int32), - ("xloc", np.float32), - ("yloc", np.float32), - ("zloc", np.float32), - ("x", np.float32), - ("y", np.float32), - ("z", np.float32), - ("zone", np.int32), - ("cellface", np.int32), - ] - ), - } + } + + @property + def dtype(self): + return self._dtype kijnames = [ "k0", @@ -560,7 +578,7 @@ def __init__( self, filename: Union[str, os.PathLike], verbose: bool = False ): super().__init__(filename, verbose) - self.dtype, self._data = self._load() + self._dtype, self._data = self._load() self.nid = np.unique(self._data["particleid"]) def _load(self) -> Tuple[np.dtype, np.ndarray]: @@ -799,61 +817,67 @@ class TimeseriesFile(ModpathFile): >>> ts1 = tsobj.get_data(partid=1) """ - dtypes = { - **dict.fromkeys( - [3, 5], - np.dtype( + @property + def dtypes(self): + return { + **dict.fromkeys( + [3, 5], + np.dtype( + [ + ("timestepindex", np.int32), + ("particleid", np.int32), + ("node", np.int32), + ("x", np.float32), + ("y", np.float32), + ("z", np.float32), + ("zloc", np.float32), + ("time", np.float32), + ("timestep", np.int32), + ] + ), + ), + 6: np.dtype( [ - ("timestepindex", np.int32), + ("timepointindex", np.int32), + ("timestep", np.int32), + ("time", np.float32), ("particleid", np.int32), - ("node", np.int32), + ("particlegroup", np.int32), ("x", np.float32), ("y", np.float32), ("z", np.float32), + ("grid", np.int32), + ("k", np.int32), + ("i", np.int32), + ("j", np.int32), + ("xloc", np.float32), + ("yloc", np.float32), ("zloc", np.float32), - ("time", np.float32), + ] + ), + 7: np.dtype( + [ + ("timepointindex", np.int32), ("timestep", np.int32), + ("time", np.float32), + ("particleid", np.int32), + ("particlegroup", np.int32), + ("particleidloc", np.int32), + ("node", np.int32), + ("xloc", np.float32), + ("yloc", np.float32), + ("zloc", np.float32), + ("x", np.float32), + ("y", np.float32), + ("z", np.float32), + ("k", np.int32), ] ), - ), - 6: np.dtype( - [ - ("timepointindex", np.int32), - ("timestep", np.int32), - ("time", np.float32), - ("particleid", np.int32), - ("particlegroup", np.int32), - ("x", np.float32), - ("y", np.float32), - ("z", np.float32), - ("grid", np.int32), - ("k", np.int32), - ("i", np.int32), - ("j", np.int32), - ("xloc", np.float32), - ("yloc", np.float32), - ("zloc", np.float32), - ] - ), - 7: np.dtype( - [ - ("timepointindex", np.int32), - ("timestep", np.int32), - ("time", np.float32), - ("particleid", np.int32), - ("particlegroup", np.int32), - ("particleidloc", np.int32), - ("node", np.int32), - ("xloc", np.float32), - ("yloc", np.float32), - ("zloc", np.float32), - ("x", np.float32), - ("y", np.float32), - ("z", np.float32), - ("k", np.int32), - ] - ), - } + } + + @property + def dtype(self): + return self._dtype kijnames = [ "k", @@ -870,7 +894,7 @@ class TimeseriesFile(ModpathFile): def __init__(self, filename, verbose=False): super().__init__(filename, verbose) - self.dtype, self._data = self._load() + self._dtype, self._data = self._load() self.nid = np.unique(self._data["particleid"]) def _load(self) -> Tuple[np.dtype, np.ndarray]: diff --git a/flopy/utils/particletrackfile.py b/flopy/utils/particletrackfile.py index bfd481178..186267fb7 100644 --- a/flopy/utils/particletrackfile.py +++ b/flopy/utils/particletrackfile.py @@ -4,11 +4,13 @@ import os from abc import ABC, abstractmethod +from collections import OrderedDict from pathlib import Path from typing import Union import numpy as np from numpy.lib.recfunctions import stack_arrays +import pandas as pd MIN_PARTICLE_TRACK_DTYPE = np.dtype( [ @@ -27,10 +29,6 @@ class ParticleTrackFile(ABC): Abstract base class for particle track output files. Exposes a unified API supporting MODPATH versions 3, 5, 6 and 7, as well as MODFLOW 6 PRT models. - Notes - ----- - - Parameters ---------- filename : str or PathLike @@ -40,17 +38,20 @@ class ParticleTrackFile(ABC): """ - dtype = MIN_PARTICLE_TRACK_DTYPE - """ - Minimal data shared by all particle track file formats. - """ - # legacy, todo: deprecate outdtype = MIN_PARTICLE_TRACK_DTYPE """ Minimal data shared by all particle track file formats. """ + @property + @abstractmethod + def dtype(self): + """ + Particle track file data dtype. + """ + return MIN_PARTICLE_TRACK_DTYPE + def __init__( self, filename: Union[str, os.PathLike], @@ -334,3 +335,16 @@ def write_shapefile( # write the final recarray to a shapefile recarray2shp(sdata, geoms, shpname=shpname, crs=crs, **kwargs) + + def validate(self): + dtype = self.dtype + expected = OrderedDict(MIN_PARTICLE_TRACK_DTYPE.fields.items()) + if isinstance(dtype, dict): + for dt in dtype.values(): + self.validate(dt) + elif isinstance(dtype, pd.Series): + subset = OrderedDict({k: v for k, v in dtype.to_dict().items() if k in MIN_PARTICLE_TRACK_DTYPE.names}) + assert subset == expected + elif isinstance(dtype, np.dtypes.VoidDType): + subset = OrderedDict({k: v for k, v in dtype.fields.items() if k in MIN_PARTICLE_TRACK_DTYPE.names}) + assert subset == expected \ No newline at end of file diff --git a/flopy/utils/prtfile.py b/flopy/utils/prtfile.py index 6df241974..219ef6d3b 100644 --- a/flopy/utils/prtfile.py +++ b/flopy/utils/prtfile.py @@ -18,61 +18,18 @@ class PathlineFile(ParticleTrackFile): key_cols = ["imdl", "iprp", "irpt", "trelease"] """Columns making up each particle's composite key.""" - dtypes = { - "base": np.dtype( - [ - ("kper", np.int32), - ("kstp", np.int32), - ("imdl", np.int32), - ("iprp", np.int32), - ("irpt", np.int32), - ("ilay", np.int32), - ("icell", np.int32), - ("izone", np.int32), - ("istatus", np.int32), - ("ireason", np.int32), - ("trelease", np.float32), - ("t", np.float32), - ("x", np.float32), - ("y", np.float32), - ("z", np.float32), - ("name", np.str_), - ] - ), - "full": np.dtype( - [ - ("kper", np.int32), - ("kstp", np.int32), - ("imdl", np.int32), - ("iprp", np.int32), - ("irpt", np.int32), - ("ilay", np.int32), - ("k", np.int32), # conform to canonical dtype - ("icell", np.int32), - ("izone", np.int32), - ("idest", np.int32), # destination zone, for convenience - ("dest", np.str_), # destination name, for convenience - ("istatus", np.int32), - ("ireason", np.int32), - ("trelease", np.float32), - ("t", np.float32), - ("t0", np.float32), # release time, for convenience - ("tt", np.float32), # termination time, convenience - ("time", np.float32), # conform to canonical dtype - ("x", np.float32), - ("y", np.float32), - ("z", np.float32), - ("particleid", np.int32), # conform to canonical dtype - ("name", np.str_), - ] - ), - } - """ - Base (directly from MODFLOW 6 PRT) and full (extended by this class) dtypes for PRT. - The extended dtype complies with the canonical `flopy.utils.particletrackfile.dtype` - and provides a few other columns for convenience, including release and termination - time and destination zone number/name. - """ + @property + def dtype(self): + """ + PRT track file dtype. This is loaded dynamically from the binary header file or CSV file + headers. A best-effort attempt is made to add extra columns to comply with the canonical + `flopy.utils.particletrackfile.dtype`, as well as convenience columns, including release + and termination time and destination zone number and name. + + Consumers of this class which expect the canonical particle track file attributes should + call `validate()` to make sure the attributes were successfully loaded. + """ + return self._dtype def __init__( self, @@ -82,14 +39,14 @@ def __init__( verbose: bool = False, ): super().__init__(filename, verbose) - self.dtype, self._data = self._load(header_filename, destination_map) + self._dtype, self._data = self._load(header_filename, destination_map) def _load( self, header_filename=None, destination_map=None, ) -> np.ndarray: - # load dtype from header file if present, otherwise use default dtype + # if a header file is present, we're reading a binary file hdr_fname = ( None if header_filename is None @@ -103,17 +60,11 @@ def _load( "formats": lines[1].strip().split(","), } ) + data = pd.DataFrame(np.fromfile(self.fname, dtype=dtype)) else: - dtype = self.dtypes["base"] - - # read as binary or csv - try: - data = pd.read_csv(self.fname, dtype=dtype) - except UnicodeDecodeError: - try: - data = pd.DataFrame(np.fromfile(self.fname, dtype=dtype)) - except: - raise ValueError("Unreadable file, expected binary or CSV") + # otherwise we're reading a CSV file + data = pd.read_csv(self.fname) + dtype = data.to_records(index=False).dtype # add particle id column data = data.sort_values(self.key_cols) @@ -134,6 +85,9 @@ def _load( .tt ) + # add k column + data["k"] = data["ilay"] + # assign destinations if zone map is provided if destination_map is not None: data["idest"] = data[data.istatus > 1].izone @@ -142,7 +96,7 @@ def _load( axis=1, ) - return self.dtypes["full"], data + return data.to_records(index=False).dtype, data def intersect( self, cells, to_recarray=True @@ -160,3 +114,12 @@ def intersect( return sect[idxs].sort_values(by=["particleid", "time"]) else: return [self.get_data(pid) for pid in pids] + + @staticmethod + def get_track_dtype(path: Union[str, os.PathLike]): + """Read a numpy dtype describing particle track + data format from the ascii track header file.""" + + hdr_lns = open(path).readlines() + hdr_lns_spl = [[ll.strip() for ll in l.split(",")] for l in hdr_lns] + return np.dtype(list(zip(hdr_lns_spl[0], hdr_lns_spl[1]))) From e8531e3db02524f7118ab8d505684afaba7549c6 Mon Sep 17 00:00:00 2001 From: wpbonelli Date: Thu, 23 May 2024 19:45:31 -0400 Subject: [PATCH 4/4] rebase with prt components, remove mock data and use real models for tests --- autotest/test_mp6.py | 6 +- autotest/test_plotutil.py | 145 ++++++++---- autotest/test_prtfile.py | 343 ++++++++++++++++++++++++----- flopy/plot/plotutil.py | 12 +- flopy/utils/modpathfile.py | 365 +++++++++++++++---------------- flopy/utils/particletrackfile.py | 35 ++- flopy/utils/prtfile.py | 57 ++++- 7 files changed, 656 insertions(+), 307 deletions(-) diff --git a/autotest/test_mp6.py b/autotest/test_mp6.py index 11e8b9034..d04ac68c1 100644 --- a/autotest/test_mp6.py +++ b/autotest/test_mp6.py @@ -310,7 +310,11 @@ def test_loadtxt(function_tmpdir, mp6_test_path): pthld = PathlineFile(pthfile) ra = loadtxt(pthfile, delimiter=" ", skiprows=3, dtype=pthld._dtype) ra2 = loadtxt( - pthfile, delimiter=" ", skiprows=3, dtype=pthld._dtype, use_pandas=False + pthfile, + delimiter=" ", + skiprows=3, + dtype=pthld._dtype, + use_pandas=False, ) assert np.array_equal(ra, ra2) diff --git a/autotest/test_plotutil.py b/autotest/test_plotutil.py index 9ed040c54..de4f05daa 100644 --- a/autotest/test_plotutil.py +++ b/autotest/test_plotutil.py @@ -14,7 +14,6 @@ from flopy.utils.modpathfile import PathlineFile as MpPathlineFile from flopy.utils.prtfile import PathlineFile as PrtPathlineFile - nlay = 1 nrow = 10 ncol = 10 @@ -73,9 +72,7 @@ def gwf_sim(function_tmpdir): pname="tdis", time_units="DAYS", nper=nper, - perioddata=[ - (perlen, nstp, tsmult) - ], + perioddata=[(perlen, nstp, tsmult)], ) # create gwf model @@ -131,9 +128,10 @@ def gwf_sim(function_tmpdir): @pytest.fixture -def mp7_sim(gwf_sim, function_tmpdir): +def mp7_sim(gwf_sim): gwf = gwf_sim.get_model() - mp7_ws = function_tmpdir / "mp7" + ws = gwf_sim.sim_path.parent + mp7_ws = ws / "mp7" releasepts_mp7 = [ # node number, localx, localy, localz (0, float(f"0.{i + 1}"), float(f"0.{i + 1}"), 0.5) @@ -172,9 +170,10 @@ def mp7_sim(gwf_sim, function_tmpdir): @pytest.fixture -def prt_sim(function_tmpdir): - gwf_ws = function_tmpdir / "gwf" - prt_ws = function_tmpdir / "prt" +def prt_sim(gwf_sim): + ws = gwf_sim.sim_path.parent + gwf_ws = ws / "gwf" + prt_ws = ws / "prt" prt_name = "plotutil_prt" gwf_name = "plotutil_gwf" releasepts_prt = [ @@ -219,9 +218,7 @@ def prt_sim(function_tmpdir): ) # create mip package - flopy.mf6.ModflowPrtmip( - prt, pname="mip", porosity=porosity - ) + flopy.mf6.ModflowPrtmip(prt, pname="mip", porosity=porosity) # create prp package prp_track_file = f"{prt_name}.prp.trk" @@ -276,8 +273,16 @@ def prt_sim(function_tmpdir): @pytest.mark.parametrize("dataframe", [True, False]) -def test_to_mp7_pathlines(prt_sim, dataframe): +def test_to_mp7_pathlines(gwf_sim, prt_sim, dataframe): + gwf_sim.write_simulation() + gwf_sim.run_simulation() + + prt_sim.write_simulation() + prt_sim.run_simulation() + prt_pls = pd.read_csv(prt_sim.sim_path / f"{prt_sim.name}.trk.csv") + if not dataframe: + prt_pls = prt_pls.to_records(index=False) mp7_pls = to_mp7_pathlines(prt_pls) assert ( @@ -291,13 +296,18 @@ def test_to_mp7_pathlines(prt_sim, dataframe): @pytest.mark.parametrize("dataframe", [True, False]) -def test_to_mp7_pathlines_empty(dataframe): +def test_to_mp7_pathlines_empty(gwf_sim, prt_sim, dataframe): + gwf_sim.write_simulation() + gwf_sim.run_simulation() + + prt_sim.write_simulation() + prt_sim.run_simulation() + + prt_pls = pd.read_csv(prt_sim.sim_path / f"{prt_sim.name}.trk.csv") prt_pls = ( - pd.DataFrame.from_records( - [], columns=PrtPathlineFile.dtype.names - ) + pd.DataFrame.from_records([], columns=prt_pls.dtypes) if dataframe - else np.recarray((0,), dtype=PrtPathlineFile.dtype) + else np.recarray((0,), dtype=prt_pls.to_records(index=False).dtype) ) mp7_pls = to_mp7_pathlines(prt_pls) @@ -308,32 +318,50 @@ def test_to_mp7_pathlines_empty(dataframe): @pytest.mark.parametrize("dataframe", [True, False]) -def test_to_mp7_pathlines_noop(mp7_sim, dataframe): - plf = flopy.utils.PathlineFile(Path(mp7_sim.model_ws) / f"{mp7_sim.name}.mppth") - og_pls = pd.DataFrame( - plf.get_destination_pathline_data(range(mp7_sim.modelgrid.nnodes), to_recarray=True) +def test_to_mp7_pathlines_noop(gwf_sim, mp7_sim, dataframe): + gwf_sim.write_simulation() + gwf_sim.run_simulation() + + mp7_sim.write_input() + mp7_sim.run_model() + + plf = flopy.utils.PathlineFile( + Path(mp7_sim.model_ws) / f"{mp7_sim.name}.mppth" + ) + og_pls = plf.get_destination_pathline_data( + range(gwf_sim.get_model().modelgrid.nnodes), to_recarray=True ) + if dataframe: + og_pls = pd.DataFrame(og_pls) mp7_pls = to_mp7_pathlines(og_pls) assert ( type(mp7_pls) - == type(mp7_pls) + == type(og_pls) == (pd.DataFrame if dataframe else np.recarray) ) assert set( dict(mp7_pls.dtypes).keys() if dataframe else mp7_pls.dtype.names ) == set(MpPathlineFile.dtypes[7].names) assert np.array_equal( - mp7_pls if dataframe else pd.DataFrame(mp7_pls), og_pls + pd.DataFrame(mp7_pls) if dataframe else mp7_pls, og_pls ) @pytest.mark.parametrize("dataframe", [True, False]) -def test_to_mp7_endpoints(prt_sim, dataframe): +def test_to_mp7_endpoints(gwf_sim, prt_sim, dataframe): + gwf_sim.write_simulation() + gwf_sim.run_simulation() + + prt_sim.write_simulation() + prt_sim.run_simulation() + prt_pls = pd.read_csv(prt_sim.sim_path / f"{prt_sim.name}.trk.csv") prt_eps = prt_pls[prt_pls.ireason == 3] - mp7_eps = to_mp7_endpoints(mp7_eps) - + if not dataframe: + prt_eps = prt_eps.to_records(index=False) + mp7_eps = to_mp7_endpoints(prt_eps) + assert np.isclose(mp7_eps.time[0], prt_eps.t.max()) assert set( dict(mp7_eps.dtypes).keys() if dataframe else mp7_eps.dtype.names @@ -341,13 +369,18 @@ def test_to_mp7_endpoints(prt_sim, dataframe): @pytest.mark.parametrize("dataframe", [True, False]) -def test_to_mp7_endpoints_empty(dataframe): +def test_to_mp7_endpoints_empty(gwf_sim, prt_sim, dataframe): + gwf_sim.write_simulation() + gwf_sim.run_simulation() + + prt_sim.write_simulation() + prt_sim.run_simulation() + + prt_pls = pd.read_csv(prt_sim.sim_path / f"{prt_sim.name}.trk.csv") mp7_eps = to_mp7_endpoints( - pd.DataFrame.from_records( - [], columns=PrtPathlineFile.dtype.names - ) + pd.DataFrame.from_records([], columns=prt_pls.dtypes) if dataframe - else np.recarray((0,), dtype=PrtPathlineFile.dtype) + else np.recarray((0,), dtype=prt_pls.to_records(index=False).dtype) ) assert mp7_eps.empty if dataframe else mp7_eps.size == 0 @@ -357,26 +390,41 @@ def test_to_mp7_endpoints_empty(dataframe): @pytest.mark.parametrize("dataframe", [True, False]) -def test_to_mp7_endpoints_noop(mp7_sim, dataframe): +def test_to_mp7_endpoints_noop(gwf_sim, mp7_sim, dataframe): """Test a recarray or dataframe which already contains MP7 endpoint data""" - epf = flopy.utils.EndpointFile(Path(mp7_sim.model_ws) / f"{mp7_sim.name}.mpend") - og_eps = pd.DataFrame( - epf.get_destination_endpoint_data(range(mp7_sim.modelgrid.nnodes), to_recarray=True) + + gwf_sim.write_simulation() + gwf_sim.run_simulation() + + mp7_sim.write_input() + mp7_sim.run_model() + + epf = flopy.utils.EndpointFile( + Path(mp7_sim.model_ws) / f"{mp7_sim.name}.mpend" ) + og_eps = epf.get_destination_endpoint_data( + range(gwf_sim.get_model().modelgrid.nnodes) + ) + if dataframe: + og_eps = pd.DataFrame(og_eps) mp7_eps = to_mp7_endpoints(og_eps) assert np.array_equal( - mp7_eps if dataframe else pd.DataFrame(mp7_eps), og_eps + pd.DataFrame(mp7_eps) if dataframe else mp7_eps, og_eps ) @pytest.mark.parametrize("dataframe", [True, False]) -def test_to_prt_pathlines_roundtrip(prt_sim, dataframe): +def test_to_prt_pathlines_roundtrip(gwf_sim, prt_sim, dataframe): + gwf_sim.write_simulation() + gwf_sim.run_simulation() + + prt_sim.write_simulation() + prt_sim.run_simulation() + og_pls = pd.read_csv(prt_sim.sim_path / f"{prt_sim.name}.trk.csv") mp7_pls = to_mp7_pathlines( - og_pls - if dataframe - else og_pls.to_records(index=False) + og_pls if dataframe else og_pls.to_records(index=False) ) prt_pls = to_prt_pathlines(mp7_pls) @@ -395,13 +443,18 @@ def test_to_prt_pathlines_roundtrip(prt_sim, dataframe): @pytest.mark.parametrize("dataframe", [True, False]) -def test_to_prt_pathlines_roundtrip_empty(dataframe): +def test_to_prt_pathlines_roundtrip_empty(gwf_sim, prt_sim, dataframe): + gwf_sim.write_simulation() + gwf_sim.run_simulation() + + prt_sim.write_simulation() + prt_sim.run_simulation() + + og_pls = pd.read_csv(prt_sim.sim_path / f"{prt_sim.name}.trk.csv") og_pls = to_mp7_pathlines( - pd.DataFrame.from_records( - [], columns=PrtPathlineFile.dtype.names - ) + pd.DataFrame.from_records([], columns=og_pls.dtypes) if dataframe - else np.recarray((0,), dtype=PrtPathlineFile.dtype) + else np.recarray((0,), dtype=og_pls.to_records(index=False).dtype) ) prt_pls = to_prt_pathlines(og_pls) diff --git a/autotest/test_prtfile.py b/autotest/test_prtfile.py index 61c5dfda2..d15aab21d 100644 --- a/autotest/test_prtfile.py +++ b/autotest/test_prtfile.py @@ -1,60 +1,297 @@ import pytest +import flopy from autotest.conftest import get_project_root_path from flopy.utils.prtfile import PathlineFile pytestmark = pytest.mark.mf6 proj_root = get_project_root_path() -prt_data_path = ( - proj_root / "examples" / "data" / "mf6" / "prt_data" / "001" -) - - -@pytest.mark.parametrize( - "path, header_path", - [ - ( - prt_data_path / "prt001.trk", - prt_data_path / "prt001.trk.hdr", - ), - (prt_data_path / "prt001.trk.csv", None), - ], -) -def test_init(path, header_path): - file = PathlineFile(path, header_filename=header_path) - assert file.fname == path - if path.suffix == ".csv": - assert len(file._data) == len(open(path).readlines()) - 1 - - -@pytest.mark.parametrize( - "path, header_path", - [ - ( - prt_data_path / "prt001.trk", - prt_data_path / "prt001.trk.hdr", - ), - (prt_data_path / "prt001.trk.csv", None), - ], -) -def test_intersect(path, header_path): - file = PathlineFile(path, header_filename=header_path) - nodes = [1, 11, 21] - intersection = file.intersect(nodes) - assert any(intersection) - assert all(d.icell in nodes for d in intersection.itertuples()) - - -@pytest.mark.parametrize( - "path, header_path", - [ - ( - prt_data_path / "prt001.trk", - prt_data_path / "prt001.trk.hdr", - ), - (prt_data_path / "prt001.trk.csv", None), - ], -) -def test_validate(path, header_path): - file = PathlineFile(path, header_filename=header_path) - file.validate() + +nlay = 1 +nrow = 10 +ncol = 10 +top = 1.0 +botm = [0.0] +nper = 1 +perlen = 1.0 +nstp = 1 +tsmult = 1.0 +porosity = 0.1 + + +def get_partdata(grid, rpts): + """ + Make a flopy.modpath.ParticleData from the given grid and release points. + """ + + if grid.grid_type == "structured": + return flopy.modpath.ParticleData( + partlocs=[grid.get_lrc(p[0])[0] for p in rpts], + structured=True, + localx=[p[1] for p in rpts], + localy=[p[2] for p in rpts], + localz=[p[3] for p in rpts], + timeoffset=0, + drape=0, + ) + else: + return flopy.modpath.ParticleData( + partlocs=[p[0] for p in rpts], + structured=False, + localx=[p[1] for p in rpts], + localy=[p[2] for p in rpts], + localz=[p[3] for p in rpts], + timeoffset=0, + drape=0, + ) + + +@pytest.fixture +def gwf_sim(function_tmpdir): + gwf_ws = function_tmpdir / "gwf" + gwf_name = "plotutil_gwf" + + # create simulation + sim = flopy.mf6.MFSimulation( + sim_name=gwf_name, + exe_name="mf6", + version="mf6", + sim_ws=gwf_ws, + ) + + # create tdis package + flopy.mf6.modflow.mftdis.ModflowTdis( + sim, + pname="tdis", + time_units="DAYS", + nper=nper, + perioddata=[(perlen, nstp, tsmult)], + ) + + # create gwf model + gwf = flopy.mf6.ModflowGwf(sim, modelname=gwf_name, save_flows=True) + + # create gwf discretization + flopy.mf6.modflow.mfgwfdis.ModflowGwfdis( + gwf, + pname="dis", + nlay=nlay, + nrow=nrow, + ncol=ncol, + ) + + # create gwf initial conditions package + flopy.mf6.modflow.mfgwfic.ModflowGwfic(gwf, pname="ic") + + # create gwf node property flow package + flopy.mf6.modflow.mfgwfnpf.ModflowGwfnpf( + gwf, + pname="npf", + save_saturation=True, + save_specific_discharge=True, + ) + + # create gwf chd package + spd = { + 0: [[(0, 0, 0), 1.0, 1.0], [(0, 9, 9), 0.0, 0.0]], + 1: [[(0, 0, 0), 0.0, 0.0], [(0, 9, 9), 1.0, 2.0]], + } + chd = flopy.mf6.ModflowGwfchd( + gwf, + pname="CHD-1", + stress_period_data=spd, + auxiliary=["concentration"], + ) + + # create gwf output control package + # output file names + gwf_budget_file = f"{gwf_name}.bud" + gwf_head_file = f"{gwf_name}.hds" + oc = flopy.mf6.ModflowGwfoc( + gwf, + budget_filerecord=gwf_budget_file, + head_filerecord=gwf_head_file, + saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], + ) + + # create iterative model solution for gwf model + ims = flopy.mf6.ModflowIms(sim) + + return sim + + +@pytest.fixture +def mp7_sim(gwf_sim): + gwf = gwf_sim.get_model() + ws = gwf_sim.sim_path.parent + mp7_ws = ws / "mp7" + releasepts_mp7 = [ + # node number, localx, localy, localz + (0, float(f"0.{i + 1}"), float(f"0.{i + 1}"), 0.5) + for i in range(9) + ] + + partdata = get_partdata(gwf.modelgrid, releasepts_mp7) + mp7_name = "plotutil_mp7" + pg = flopy.modpath.ParticleGroup( + particlegroupname="G1", + particledata=partdata, + filename=f"{mp7_name}.sloc", + ) + mp = flopy.modpath.Modpath7( + modelname=mp7_name, + flowmodel=gwf, + exe_name="mp7", + model_ws=mp7_ws, + headfilename=f"{gwf.name}.hds", + budgetfilename=f"{gwf.name}.bud", + ) + mpbas = flopy.modpath.Modpath7Bas( + mp, + porosity=porosity, + ) + mpsim = flopy.modpath.Modpath7Sim( + mp, + simulationtype="pathline", + trackingdirection="forward", + budgetoutputoption="summary", + stoptimeoption="extend", + particlegroups=[pg], + ) + + return mp + + +@pytest.fixture +def prt_sim(gwf_sim): + ws = gwf_sim.sim_path.parent + gwf_ws = ws / "gwf" + prt_ws = ws / "prt" + prt_name = "plotutil_prt" + gwf_name = "plotutil_gwf" + releasepts_prt = [ + # particle index, k, i, j, x, y, z + [i, 0, 0, 0, float(f"0.{i + 1}"), float(f"9.{i + 1}"), 0.5] + for i in range(9) + ] + + # create simulation + sim = flopy.mf6.MFSimulation( + sim_name=prt_name, + exe_name="mf6", + version="mf6", + sim_ws=prt_ws, + ) + + # create tdis package + flopy.mf6.modflow.mftdis.ModflowTdis( + sim, + pname="tdis", + time_units="DAYS", + nper=nper, + perioddata=[ + ( + perlen, + nstp, + tsmult, + ) + ], + ) + + # create prt model + prt = flopy.mf6.ModflowPrt(sim, modelname=prt_name, save_flows=True) + + # create prt discretization + flopy.mf6.modflow.mfgwfdis.ModflowGwfdis( + prt, + pname="dis", + nlay=nlay, + nrow=nrow, + ncol=ncol, + ) + + # create mip package + flopy.mf6.ModflowPrtmip(prt, pname="mip", porosity=porosity) + + # create prp package + prp_track_file = f"{prt_name}.prp.trk" + prp_track_csv_file = f"{prt_name}.prp.trk.csv" + flopy.mf6.ModflowPrtprp( + prt, + pname="prp1", + filename=f"{prt_name}_1.prp", + nreleasepts=len(releasepts_prt), + packagedata=releasepts_prt, + perioddata={0: ["FIRST"]}, + track_filerecord=[prp_track_file], + trackcsv_filerecord=[prp_track_csv_file], + stop_at_weak_sink="saws" in prt_name, + boundnames=True, + exit_solve_tolerance=1e-5, + ) + + # create output control package + prt_budget_file = f"{prt_name}.bud" + prt_track_file = f"{prt_name}.trk" + prt_track_csv_file = f"{prt_name}.trk.csv" + flopy.mf6.ModflowPrtoc( + prt, + pname="oc", + budget_filerecord=[prt_budget_file], + track_filerecord=[prt_track_file], + trackcsv_filerecord=[prt_track_csv_file], + saverecord=[("BUDGET", "ALL")], + ) + + # create the flow model interface + gwf_budget_file = gwf_ws / f"{gwf_name}.bud" + gwf_head_file = gwf_ws / f"{gwf_name}.hds" + flopy.mf6.ModflowPrtfmi( + prt, + packagedata=[ + ("GWFHEAD", gwf_head_file), + ("GWFBUDGET", gwf_budget_file), + ], + ) + + # add explicit model solution + ems = flopy.mf6.ModflowEms( + sim, + pname="ems", + filename=f"{prt_name}.ems", + ) + sim.register_solution_package(ems, [prt.name]) + + return sim + + +# todo: finish tests + +# @pytest.mark.parametrize("csv", [False, True]) +# def test_init(gwf_sim, prt_sim, csv): +# +# +# file = PathlineFile(path, header_filename=header_path) +# assert file.fname == path +# if path.suffix == ".csv": +# assert len(file._data) == len(open(path).readlines()) - 1 +# +# +# @pytest.mark.parametrize("csv", [False, True]) +# def test_intersect(gwf_sim, prt_sim, csv): +# +# +# file = PathlineFile(path, header_filename=header_path) +# nodes = [1, 11, 21] +# intersection = file.intersect(nodes) +# assert any(intersection) +# assert all(d.icell in nodes for d in intersection.itertuples()) +# +# +# @pytest.mark.parametrize("csv", [False, True]) +# def test_validate(gwf_sim, prt_sim, csv): +# +# +# file = PathlineFile(path, header_filename=header_path) +# file.validate() +# diff --git a/flopy/plot/plotutil.py b/flopy/plot/plotutil.py index fba2dc062..c4e6533e1 100644 --- a/flopy/plot/plotutil.py +++ b/flopy/plot/plotutil.py @@ -2733,7 +2733,7 @@ def to_mp7_pathlines( ], dtype=MpPathlineFile.dtypes[7], ) - elif all(n in data.dtypes for n in ParticleTrackFile._dtype.names): + elif all(n in data.dtypes for n in ParticleTrackFile.dtypes["base"].names): data = np.core.records.fromarrays( [ data["particleid"], @@ -2762,7 +2762,7 @@ def to_mp7_pathlines( f"{pformat(MpPathlineFile.dtypes[6].names)} for MODPATH 6, or;\n" f"{pformat(MpPathlineFile.dtypes[7].names)} for MODPATH 7, or;\n" f"{pformat(PrtPathlineFile.dtypes['base'].names)} for MODFLOW 6 PRT, or;\n" - f"{pformat(ParticleTrackFile._dtype.names)} (minimal expected dtype names)" + f"{pformat(ParticleTrackFile.dtypes['base'].names)} (minimal expected dtype names)" ) return pd.DataFrame(data) if rt == pd.DataFrame else data @@ -2893,7 +2893,7 @@ def to_mp7_endpoints( ], dtype=MpEndpointFile.dtypes[7], ) - elif all(n in data.dtypes for n in ParticleTrackFile._dtype.names): + elif all(n in data.dtypes for n in ParticleTrackFile.dtypes["base"].names): data = np.core.records.fromarrays( [ endpts["particleid"], @@ -2933,7 +2933,7 @@ def to_mp7_endpoints( f"{pformat(MpEndpointFile.dtypes[6].names)} for MODPATH 6, or;\n" f"{pformat(MpEndpointFile.dtypes[7].names)} for MODPATH 7, or;\n" f"{pformat(PrtPathlineFile.dtypes['base'].names)} for MODFLOW 6 PRT, or;\n" - f"{pformat(ParticleTrackFile._dtype.names)} (minimal expected dtype names)" + f"{pformat(ParticleTrackFile.dtypes['base'].names)} (minimal expected dtype names)" ) return pd.DataFrame(data) if rt == pd.DataFrame else data @@ -3020,7 +3020,7 @@ def to_prt_pathlines( n in data.dtypes for n in PrtPathlineFile.dtypes["base"].names ): # already in prt format? return data if rt == pd.DataFrame else data.to_records(index=False) - elif all(n in data.dtypes for n in ParticleTrackFile._dtype.names): + elif all(n in data.dtypes for n in ParticleTrackFile.dtype["base"].names): data = np.core.records.fromarrays( [ np.zeros(data.shape[0]), @@ -3053,7 +3053,7 @@ def to_prt_pathlines( f"{pformat(MpEndpointFile.dtypes[5].names)} for MODPATH 5 endpoints, or;\n" f"{pformat(MpEndpointFile.dtypes[3].names)} for MODPATH 3 endpoints, or;\n" f"{pformat(PrtPathlineFile.dtypes['base'].names)} for MODFLOW 6 PRT, or;\n" - f"{pformat(ParticleTrackFile._dtype.names)} (minimal expected dtype names)" + f"{pformat(ParticleTrackFile.dtypes['base'].names)} (minimal expected dtype names)" ) if rt == pd.DataFrame: diff --git a/flopy/utils/modpathfile.py b/flopy/utils/modpathfile.py index a226d89f3..3f3585fbc 100644 --- a/flopy/utils/modpathfile.py +++ b/flopy/utils/modpathfile.py @@ -18,11 +18,6 @@ class ModpathFile(ParticleTrackFile, ABC): """Provides MODPATH output file support.""" - @property - @abstractmethod - def dtypes(self): - pass - def __init__( self, filename: Union[str, os.PathLike], verbose: bool = False ): @@ -167,67 +162,65 @@ class PathlineFile(ModpathFile): """ - @property - def dtypes(self): - return { - **dict.fromkeys( - [3, 5], - np.dtype( - [ - ("particleid", np.int32), - ("x", np.float32), - ("y", np.float32), - ("zloc", np.float32), - ("z", np.float32), - ("time", np.float32), - ("j", np.int32), - ("i", np.int32), - ("k", np.int32), - ("cumulativetimestep", np.int32), - ] - ), - ), - 6: np.dtype( + dtypes = { + **dict.fromkeys( + [3, 5], + np.dtype( [ ("particleid", np.int32), - ("particlegroup", np.int32), - ("timepointindex", np.int32), - ("cumulativetimestep", np.int32), - ("time", np.float32), ("x", np.float32), ("y", np.float32), - ("z", np.float32), - ("k", np.int32), - ("i", np.int32), - ("j", np.int32), - ("grid", np.int32), - ("xloc", np.float32), - ("yloc", np.float32), ("zloc", np.float32), - ("linesegmentindex", np.int32), - ] - ), - 7: np.dtype( - [ - ("particleid", np.int32), - ("particlegroup", np.int32), - ("sequencenumber", np.int32), - ("particleidloc", np.int32), - ("time", np.float32), - ("x", np.float32), - ("y", np.float32), ("z", np.float32), + ("time", np.float32), + ("j", np.int32), + ("i", np.int32), ("k", np.int32), - ("node", np.int32), - ("xloc", np.float32), - ("yloc", np.float32), - ("zloc", np.float32), - ("stressperiod", np.int32), - ("timestep", np.int32), + ("cumulativetimestep", np.int32), ] ), - } - + ), + 6: np.dtype( + [ + ("particleid", np.int32), + ("particlegroup", np.int32), + ("timepointindex", np.int32), + ("cumulativetimestep", np.int32), + ("time", np.float32), + ("x", np.float32), + ("y", np.float32), + ("z", np.float32), + ("k", np.int32), + ("i", np.int32), + ("j", np.int32), + ("grid", np.int32), + ("xloc", np.float32), + ("yloc", np.float32), + ("zloc", np.float32), + ("linesegmentindex", np.int32), + ] + ), + 7: np.dtype( + [ + ("particleid", np.int32), + ("particlegroup", np.int32), + ("sequencenumber", np.int32), + ("particleidloc", np.int32), + ("time", np.float32), + ("x", np.float32), + ("y", np.float32), + ("z", np.float32), + ("k", np.int32), + ("node", np.int32), + ("xloc", np.float32), + ("yloc", np.float32), + ("zloc", np.float32), + ("stressperiod", np.int32), + ("timestep", np.int32), + ] + ), + } + @property def dtype(self): return self._dtype @@ -459,100 +452,98 @@ class EndpointFile(ModpathFile): """ - @property - def dtypes(self): - return { - **dict.fromkeys( - [3, 5], - np.dtype( - [ - ("zone", np.int32), - ("j", np.int32), - ("i", np.int32), - ("k", np.int32), - ("x", np.float32), - ("y", np.float32), - ("z", np.float32), - ("zloc", np.float32), - ("time", np.float32), - ("x0", np.float32), - ("y0", np.float32), - ("zloc0", np.float32), - ("j0", np.int32), - ("i0", np.int32), - ("k0", np.int32), - ("zone0", np.int32), - ("cumulativetimestep", np.int32), - ("ipcode", np.int32), - ("time0", np.float32), - ] - ), - ), - 6: np.dtype( + dtypes = { + **dict.fromkeys( + [3, 5], + np.dtype( [ - ("particleid", np.int32), - ("particlegroup", np.int32), - ("status", np.int32), - ("time0", np.float32), - ("time", np.float32), - ("initialgrid", np.int32), - ("k0", np.int32), - ("i0", np.int32), - ("j0", np.int32), - ("cellface0", np.int32), - ("zone0", np.int32), - ("xloc0", np.float32), - ("yloc0", np.float32), - ("zloc0", np.float32), - ("x0", np.float32), - ("y0", np.float32), - ("z0", np.float32), - ("finalgrid", np.int32), - ("k", np.int32), - ("i", np.int32), - ("j", np.int32), - ("cellface", np.int32), ("zone", np.int32), - ("xloc", np.float32), - ("yloc", np.float32), - ("zloc", np.float32), + ("j", np.int32), + ("i", np.int32), + ("k", np.int32), ("x", np.float32), ("y", np.float32), ("z", np.float32), - ("label", "|S40"), - ] - ), - 7: np.dtype( - [ - ("particleid", np.int32), - ("particlegroup", np.int32), - ("particleidloc", np.int32), - ("status", np.int32), - ("time0", np.float32), + ("zloc", np.float32), ("time", np.float32), - ("node0", np.int32), - ("k0", np.int32), - ("xloc0", np.float32), - ("yloc0", np.float32), - ("zloc0", np.float32), ("x0", np.float32), ("y0", np.float32), - ("z0", np.float32), + ("zloc0", np.float32), + ("j0", np.int32), + ("i0", np.int32), + ("k0", np.int32), ("zone0", np.int32), - ("initialcellface", np.int32), - ("node", np.int32), - ("k", np.int32), - ("xloc", np.float32), - ("yloc", np.float32), - ("zloc", np.float32), - ("x", np.float32), - ("y", np.float32), - ("z", np.float32), - ("zone", np.int32), - ("cellface", np.int32), + ("cumulativetimestep", np.int32), + ("ipcode", np.int32), + ("time0", np.float32), ] ), - } + ), + 6: np.dtype( + [ + ("particleid", np.int32), + ("particlegroup", np.int32), + ("status", np.int32), + ("time0", np.float32), + ("time", np.float32), + ("initialgrid", np.int32), + ("k0", np.int32), + ("i0", np.int32), + ("j0", np.int32), + ("cellface0", np.int32), + ("zone0", np.int32), + ("xloc0", np.float32), + ("yloc0", np.float32), + ("zloc0", np.float32), + ("x0", np.float32), + ("y0", np.float32), + ("z0", np.float32), + ("finalgrid", np.int32), + ("k", np.int32), + ("i", np.int32), + ("j", np.int32), + ("cellface", np.int32), + ("zone", np.int32), + ("xloc", np.float32), + ("yloc", np.float32), + ("zloc", np.float32), + ("x", np.float32), + ("y", np.float32), + ("z", np.float32), + ("label", "|S40"), + ] + ), + 7: np.dtype( + [ + ("particleid", np.int32), + ("particlegroup", np.int32), + ("particleidloc", np.int32), + ("status", np.int32), + ("time0", np.float32), + ("time", np.float32), + ("node0", np.int32), + ("k0", np.int32), + ("xloc0", np.float32), + ("yloc0", np.float32), + ("zloc0", np.float32), + ("x0", np.float32), + ("y0", np.float32), + ("z0", np.float32), + ("zone0", np.int32), + ("initialcellface", np.int32), + ("node", np.int32), + ("k", np.int32), + ("xloc", np.float32), + ("yloc", np.float32), + ("zloc", np.float32), + ("x", np.float32), + ("y", np.float32), + ("z", np.float32), + ("zone", np.int32), + ("cellface", np.int32), + ] + ), + } @property def dtype(self): @@ -817,63 +808,61 @@ class TimeseriesFile(ModpathFile): >>> ts1 = tsobj.get_data(partid=1) """ - @property - def dtypes(self): - return { - **dict.fromkeys( - [3, 5], - np.dtype( - [ - ("timestepindex", np.int32), - ("particleid", np.int32), - ("node", np.int32), - ("x", np.float32), - ("y", np.float32), - ("z", np.float32), - ("zloc", np.float32), - ("time", np.float32), - ("timestep", np.int32), - ] - ), - ), - 6: np.dtype( + dtypes = { + **dict.fromkeys( + [3, 5], + np.dtype( [ - ("timepointindex", np.int32), - ("timestep", np.int32), - ("time", np.float32), + ("timestepindex", np.int32), ("particleid", np.int32), - ("particlegroup", np.int32), + ("node", np.int32), ("x", np.float32), ("y", np.float32), ("z", np.float32), - ("grid", np.int32), - ("k", np.int32), - ("i", np.int32), - ("j", np.int32), - ("xloc", np.float32), - ("yloc", np.float32), ("zloc", np.float32), - ] - ), - 7: np.dtype( - [ - ("timepointindex", np.int32), - ("timestep", np.int32), ("time", np.float32), - ("particleid", np.int32), - ("particlegroup", np.int32), - ("particleidloc", np.int32), - ("node", np.int32), - ("xloc", np.float32), - ("yloc", np.float32), - ("zloc", np.float32), - ("x", np.float32), - ("y", np.float32), - ("z", np.float32), - ("k", np.int32), + ("timestep", np.int32), ] ), - } + ), + 6: np.dtype( + [ + ("timepointindex", np.int32), + ("timestep", np.int32), + ("time", np.float32), + ("particleid", np.int32), + ("particlegroup", np.int32), + ("x", np.float32), + ("y", np.float32), + ("z", np.float32), + ("grid", np.int32), + ("k", np.int32), + ("i", np.int32), + ("j", np.int32), + ("xloc", np.float32), + ("yloc", np.float32), + ("zloc", np.float32), + ] + ), + 7: np.dtype( + [ + ("timepointindex", np.int32), + ("timestep", np.int32), + ("time", np.float32), + ("particleid", np.int32), + ("particlegroup", np.int32), + ("particleidloc", np.int32), + ("node", np.int32), + ("xloc", np.float32), + ("yloc", np.float32), + ("zloc", np.float32), + ("x", np.float32), + ("y", np.float32), + ("z", np.float32), + ("k", np.int32), + ] + ), + } @property def dtype(self): diff --git a/flopy/utils/particletrackfile.py b/flopy/utils/particletrackfile.py index 186267fb7..e3fcd5653 100644 --- a/flopy/utils/particletrackfile.py +++ b/flopy/utils/particletrackfile.py @@ -9,8 +9,8 @@ from typing import Union import numpy as np -from numpy.lib.recfunctions import stack_arrays import pandas as pd +from numpy.lib.recfunctions import stack_arrays MIN_PARTICLE_TRACK_DTYPE = np.dtype( [ @@ -44,13 +44,8 @@ class ParticleTrackFile(ABC): Minimal data shared by all particle track file formats. """ - @property - @abstractmethod - def dtype(self): - """ - Particle track file data dtype. - """ - return MIN_PARTICLE_TRACK_DTYPE + dtypes = {"base": ..., "full": ...} + """Base and full (extended) canonical pathline data dtypes.""" def __init__( self, @@ -126,6 +121,12 @@ def get_data( return data[idx] + def get_dataframe(self) -> pd.DataFrame: + return self._data + + def get_recarray(self) -> np.recarray: + return self._data.to_records(index=False) + def get_alldata(self, totim=None, ge=True, minimal=False): """ Get all particle tracks separately, optionally filtering by time. @@ -343,8 +344,20 @@ def validate(self): for dt in dtype.values(): self.validate(dt) elif isinstance(dtype, pd.Series): - subset = OrderedDict({k: v for k, v in dtype.to_dict().items() if k in MIN_PARTICLE_TRACK_DTYPE.names}) + subset = OrderedDict( + { + k: v + for k, v in dtype.to_dict().items() + if k in MIN_PARTICLE_TRACK_DTYPE.names + } + ) assert subset == expected elif isinstance(dtype, np.dtypes.VoidDType): - subset = OrderedDict({k: v for k, v in dtype.fields.items() if k in MIN_PARTICLE_TRACK_DTYPE.names}) - assert subset == expected \ No newline at end of file + subset = OrderedDict( + { + k: v + for k, v in dtype.fields.items() + if k in MIN_PARTICLE_TRACK_DTYPE.names + } + ) + assert subset == expected diff --git a/flopy/utils/prtfile.py b/flopy/utils/prtfile.py index 219ef6d3b..5841f0e02 100644 --- a/flopy/utils/prtfile.py +++ b/flopy/utils/prtfile.py @@ -18,13 +18,66 @@ class PathlineFile(ParticleTrackFile): key_cols = ["imdl", "iprp", "irpt", "trelease"] """Columns making up each particle's composite key.""" + dtypes = { + "base": np.dtype( + [ + ("kper", np.int32), + ("kstp", np.int32), + ("imdl", np.int32), + ("iprp", np.int32), + ("irpt", np.int32), + ("ilay", np.int32), + ("icell", np.int32), + ("izone", np.int32), + ("istatus", np.int32), + ("ireason", np.int32), + ("trelease", np.float32), + ("t", np.float32), + ("x", np.float32), + ("y", np.float32), + ("z", np.float32), + ("name", np.str_), + ] + ), + "full": np.dtype( + [ + ("kper", np.int32), + ("kstp", np.int32), + ("imdl", np.int32), + ("iprp", np.int32), + ("irpt", np.int32), + ("ilay", np.int32), + ("k", np.int32), # canonical base dtype + ("icell", np.int32), + ("izone", np.int32), + ("idest", np.int32), # canonical full dtype + ("dest", np.str_), # canonical full dtype + ("istatus", np.int32), + ("ireason", np.int32), + ("trelease", np.float32), + ("t", np.float32), + ("t0", np.float32), # canonical full dtype + ("tt", np.float32), # canonical full dtype + ("time", np.float32), # canonical full dtype + ("x", np.float32), + ("y", np.float32), + ("z", np.float32), + ("particleid", np.int32), # canonical base dtype + ("name", np.str_), + ] + ), + } + """Base and full (extended) PRT pathline data dtypes.""" + @property def dtype(self): """ PRT track file dtype. This is loaded dynamically from the binary header file or CSV file headers. A best-effort attempt is made to add extra columns to comply with the canonical - `flopy.utils.particletrackfile.dtype`, as well as convenience columns, including release - and termination time and destination zone number and name. + `flopy.utils.particletrackfile.dtypes["base"]`, as well as convenience columns including + release and termination time, and destination zone number and name, for the full dtype. + If the loaded dtype is discovered to be different from `PrtPathlineFile.dtypes["base"]`, + a warning will be issued. Consumers of this class which expect the canonical particle track file attributes should call `validate()` to make sure the attributes were successfully loaded.