From f0074f4e81f0b9edc0a20affe5c7e725cd6d567b Mon Sep 17 00:00:00 2001 From: mjreno Date: Fri, 1 Aug 2025 16:13:18 -0400 Subject: [PATCH 01/44] mfpackage and mfmodel netcdf info --- .docs/Notebooks/netcdf01_tutorial.py | 344 +++++++++++++++++++++ .docs/Notebooks/netcdf02_tutorial.py | 427 +++++++++++++++++++++++++++ etc/environment.yml | 6 +- flopy/mf6/data/mfstructure.py | 4 + flopy/mf6/mfmodel.py | 46 +++ flopy/mf6/mfpackage.py | 156 ++++++++++ 6 files changed, 980 insertions(+), 3 deletions(-) create mode 100644 .docs/Notebooks/netcdf01_tutorial.py create mode 100644 .docs/Notebooks/netcdf02_tutorial.py diff --git a/.docs/Notebooks/netcdf01_tutorial.py b/.docs/Notebooks/netcdf01_tutorial.py new file mode 100644 index 0000000000..b70c22af3e --- /dev/null +++ b/.docs/Notebooks/netcdf01_tutorial.py @@ -0,0 +1,344 @@ +import sys +from pathlib import Path +from pprint import pformat +from tempfile import TemporaryDirectory + +import numpy as np +import xarray as xr + +import flopy + +print(sys.version) +print(f"flopy version: {flopy.__version__}") + +DNODATA = 3.0e30 + + +def create_sim(ws): + name = "uzf01" + perlen = [500.0] + nper = len(perlen) + nstp = [10] + tsmult = nper * [1.0] + crs = "EPSG:26916" + nlay, nrow, ncol = 100, 1, 1 + delr = 1.0 + delc = 1.0 + delv = 1.0 + top = 100.0 + botm = [top - (k + 1) * delv for k in range(nlay)] + strt = 0.5 + hk = 1.0 + laytyp = 1 + ss = 0.0 + sy = 0.1 + + tdis_rc = [] + for i in range(nper): + tdis_rc.append((perlen[i], nstp[i], tsmult[i])) + + # build MODFLOW 6 files + sim = flopy.mf6.MFSimulation( + sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws + ) + + # create tdis package + tdis = flopy.mf6.ModflowTdis(sim, time_units="DAYS", nper=nper, perioddata=tdis_rc) + + # create iterative model solution and register the gwf model with it + nouter, ninner = 100, 10 + hclose, rclose, relax = 1.5e-6, 1e-6, 0.97 + imsgwf = flopy.mf6.ModflowIms( + sim, + print_option="SUMMARY", + outer_dvclose=hclose, + outer_maximum=nouter, + under_relaxation="DBD", + under_relaxation_theta=0.7, + inner_maximum=ninner, + inner_dvclose=hclose, + rcloserecord=rclose, + linear_acceleration="BICGSTAB", + scaling_method="NONE", + reordering_method="NONE", + relaxation_factor=relax, + ) + + # create gwf model + newtonoptions = "NEWTON UNDER_RELAXATION" + gwf = flopy.mf6.ModflowGwf( + sim, + modelname=name, + newtonoptions=newtonoptions, + save_flows=True, + ) + + dis = flopy.mf6.ModflowGwfdis( + gwf, + crs=crs, + nlay=nlay, + nrow=nrow, + ncol=ncol, + delr=delr, + delc=delc, + top=top, + botm=botm, + idomain=np.ones((nlay, nrow, ncol), dtype=int), + ) + + # initial conditions + ic = flopy.mf6.ModflowGwfic(gwf, strt=strt) + + # node property flow + npf = flopy.mf6.ModflowGwfnpf(gwf, save_flows=False, icelltype=laytyp, k=hk) + # storage + sto = flopy.mf6.ModflowGwfsto( + gwf, + save_flows=False, + iconvert=laytyp, + ss=ss, + sy=sy, + steady_state={0: False}, + transient={0: True}, + ) + + # ghbg + ghb_obs = {f"{name}.ghb.obs.csv": [("100_1_1", "GHB", (99, 0, 0))]} + bhead = np.full(nlay * nrow * ncol, DNODATA, dtype=float) + cond = np.full(nlay * nrow * ncol, DNODATA, dtype=float) + bhead[nlay - 1] = 1.5 + cond[nlay - 1] = 1.0 + ghb = flopy.mf6.ModflowGwfghbg( + gwf, + print_input=True, + print_flows=True, + bhead=bhead, + cond=cond, + save_flows=False, + ) + + ghb.obs.initialize( + filename=f"{name}.ghb.obs", + digits=20, + print_input=True, + continuous=ghb_obs, + ) + + # note: for specifying lake number, use fortran indexing! + uzf_obs = { + f"{name}.uzf.obs.csv": [ + ("wc 02", "water-content", 2, 0.5), + ("wc 50", "water-content", 50, 0.5), + ("wcbn 02", "water-content", "uzf 002", 0.5), + ("wcbn 50", "water-content", "UZF 050", 0.5), + ("rch 02", "uzf-gwrch", "uzf 002"), + ("rch 50", "uzf-gwrch", "uzf 050"), + ] + } + + sd = 0.1 + vks = hk + thtr = 0.05 + thti = thtr + thts = sy + eps = 4 + uzf_pkdat = [[0, (0, 0, 0), 1, 1, sd, vks, thtr, thts, thti, eps, "uzf 001"]] + [ + [k, (k, 0, 0), 0, k + 1, sd, vks, thtr, thts, thti, eps, f"uzf {k + 1:03d}"] + for k in range(1, nlay - 1) + ] + uzf_pkdat[-1][3] = -1 + infiltration = 2.01 + uzf_spd = {0: [[0, infiltration, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]} + uzf = flopy.mf6.ModflowGwfuzf( + gwf, + print_input=True, + print_flows=True, + save_flows=True, + boundnames=True, + ntrailwaves=15, + nwavesets=40, + nuzfcells=len(uzf_pkdat), + packagedata=uzf_pkdat, + perioddata=uzf_spd, + budget_filerecord=f"{name}.uzf.bud", + budgetcsv_filerecord=f"{name}.uzf.bud.csv", + observations=uzf_obs, + filename=f"{name}.uzf", + ) + + # output control + oc = flopy.mf6.ModflowGwfoc( + gwf, + budget_filerecord=f"{name}.bud", + head_filerecord=f"{name}.hds", + headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")], + saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], + printrecord=[("HEAD", "LAST"), ("BUDGET", "ALL")], + ) + + obs_lst = [] + obs_lst.append(["obs1", "head", (0, 0, 0)]) + obs_lst.append(["obs2", "head", (1, 0, 0)]) + obs_dict = {f"{name}.obs.csv": obs_lst} + obs = flopy.mf6.ModflowUtlobs(gwf, pname="head_obs", digits=20, continuous=obs_dict) + + return sim + + +def add_netcdf_vars(dataset, nc_info, dimmap): + def _data_shape(shape): + dims_l = [] + for d in shape: + dims_l.append(dimmap[d]) + + return dims_l + + for v in nc_info: + varname = nc_info[v]["varname"] + data = np.full( + _data_shape(nc_info[v]["nc_shape"]), + nc_info[v]["attrs"]["_FillValue"], + dtype=nc_info[v]["nc_type"], + ) + var_d = {varname: (nc_info[v]["nc_shape"], data)} + dataset = dataset.assign(var_d) + for a in nc_info[v]["attrs"]: + dataset[varname].attrs[a] = nc_info[v]["attrs"][a] + + return dataset + + +temp_dir = TemporaryDirectory() +workspace = Path(temp_dir.name) + +# run the non-netcdf simulation +sim = create_sim(ws=workspace) +sim.write_simulation() +success, buff = sim.run_simulation(silent=True, report=True) +assert success, pformat(buff) + +# create directory for netcdf sim +sim.set_sim_path(workspace / "netcdf") +gwf = sim.get_model("uzf01") +gwf.name_file.nc_filerecord = "uzf01.structured.nc" +sim.write_simulation() + +# create the netcdf dataset +ds = xr.Dataset() + +# get model netcdf info +nc_info = gwf.netcdf_info() + +# update dataset with required attributes +for a in nc_info["attrs"]: + ds.attrs[a] = nc_info["attrs"][a] + +# set dimensional info +dis = gwf.modelgrid +xoff = dis.xoffset +yoff = dis.yoffset +x = xoff + dis.xycenters[0] +y = yoff + dis.xycenters[1] +z = [float(x) for x in range(1, dis.nlay + 1)] +nstp = sum(gwf.modeltime.nstp) +time = gwf.modeltime.tslen +nlay = dis.nlay +nrow = dis.nrow +ncol = dis.ncol +dimmap = {"time": nstp, "z": nlay, "y": nrow, "x": ncol} + +# create coordinate vars +var_d = {"time": (["time"], time), "z": (["z"], z), "y": (["y"], y), "x": (["x"], x)} +ds = ds.assign(var_d) + +# dis +dis = gwf.get_package("dis") +nc_info = dis.netcdf_info() + +# create dis dataset variables +ds = add_netcdf_vars(ds, nc_info, dimmap) + +# update data +ds["dis_delr"].values = dis.delr.get_data() +ds["dis_delc"].values = dis.delc.get_data() +ds["dis_top"].values = dis.top.get_data() +ds["dis_botm"].values = dis.botm.get_data() +ds["dis_idomain"].values = dis.idomain.get_data() + +# update dis to read from netcdf +with open(workspace / "netcdf" / "uzf01.dis", "w") as f: + f.write("BEGIN options\n") + f.write(" crs EPSG:26916\n") + f.write("END options\n\n") + f.write("BEGIN dimensions\n") + f.write(" NLAY 100\n") + f.write(" NROW 1\n") + f.write(" NCOL 1\n") + f.write("END dimensions\n\n") + f.write("BEGIN griddata\n") + f.write(" delr NETCDF\n") + f.write(" delc NETCDF\n") + f.write(" top NETCDF\n") + f.write(" botm NETCDF\n") + f.write(" idomain NETCDF\n") + f.write("END griddata\n") + +# npf +npf = gwf.get_package("npf") +nc_info = npf.netcdf_info() + +# create npf dataset variables +ds = add_netcdf_vars(ds, nc_info, dimmap) + +# update data +ds["npf_icelltype"].values = npf.icelltype.get_data() +ds["npf_k"].values = npf.k.get_data() + +# update npf to read from netcdf +with open(workspace / "netcdf" / "uzf01.npf", "w") as f: + f.write("BEGIN options\n") + f.write("END options\n\n") + f.write("BEGIN griddata\n") + f.write(" icelltype NETCDF\n") + f.write(" k NETCDF\n") + f.write("END griddata\n") + +# get ghbg package netcdf info +ghbg = gwf.get_package("ghbg_0") +nc_info = ghbg.netcdf_info() + +# create ghbg dataset variables +ds = add_netcdf_vars(ds, nc_info, dimmap) + +# update bhead netcdf array from flopy perioddata +for p in ghbg.bhead.get_data(): + istp = sum(gwf.modeltime.nstp[0:p]) + ds["ghbg_0_bhead"].values[istp] = ghbg.bhead.get_data()[p] + +# update cond netcdf array from flopy perioddata +for p in ghbg.cond.get_data(): + istp = sum(gwf.modeltime.nstp[0:p]) + ds["ghbg_0_cond"].values[istp] = ghbg.cond.get_data()[p] + +# write the netcdf +ds.to_netcdf( + workspace / "netcdf/uzf01.structured.nc", format="NETCDF4", engine="netcdf4" +) + +# update ghbg to read from netcdf +with open(workspace / "netcdf/uzf01.ghbg", "w") as f: + f.write("BEGIN options\n") + f.write(" READARRAYGRID\n") + f.write(" PRINT_INPUT\n") + f.write(" PRINT_FLOWS\n") + f.write(" OBS6 FILEIN uzf01.ghb.obs\n") + f.write("END options\n\n") + f.write("BEGIN period 1\n") + f.write(" bhead NETCDF\n") + f.write(" cond NETCDF\n") + f.write("END period 1\n") + +# TODO need extended modflow 6 to run this simulation +# run the netcdf sim +# success, buff = sim.run_simulation(silent=True, report=True) +# assert success, pformat(buff) diff --git a/.docs/Notebooks/netcdf02_tutorial.py b/.docs/Notebooks/netcdf02_tutorial.py new file mode 100644 index 0000000000..4df8cbd297 --- /dev/null +++ b/.docs/Notebooks/netcdf02_tutorial.py @@ -0,0 +1,427 @@ +import sys +from pathlib import Path +from pprint import pformat +from tempfile import TemporaryDirectory + +import numpy as np +import xarray as xr + +import flopy + +print(sys.version) +print(f"flopy version: {flopy.__version__}") +# - + +DNODATA = 3.0e30 + + +def create_sim(ws): + name = "flow" + gwfname = name + sim = flopy.mf6.MFSimulation(sim_name=name, sim_ws=ws, exe_name="mf6") + tdis_rc = [(100.0, 1, 1.0), (100.0, 1, 1.0)] + nper = len(tdis_rc) + tdis = flopy.mf6.ModflowTdis(sim, time_units="DAYS", nper=nper, perioddata=tdis_rc) + + gwf = flopy.mf6.ModflowGwf(sim, modelname=gwfname, save_flows=True) + + # ims + hclose = 1.0e-6 + rclose = 1.0e-6 + nouter = 1000 + ninner = 100 + relax = 0.99 + imsgwf = flopy.mf6.ModflowIms( + sim, + print_option="ALL", + outer_dvclose=hclose, + outer_maximum=nouter, + under_relaxation="NONE", + inner_maximum=ninner, + inner_dvclose=hclose, + rcloserecord=rclose, + linear_acceleration="CG", + scaling_method="NONE", + reordering_method="NONE", + relaxation_factor=relax, + filename=f"{gwfname}.ims", + ) + + nlay = 1 + nrow = 10 + ncol = 10 + delr = 10.0 + delc = 10.0 + top = 100.0 + botm = 0.0 + + dis = flopy.mf6.ModflowGwfdis( + gwf, + nlay=nlay, + nrow=nrow, + ncol=ncol, + delr=delr, + delc=delc, + top=top, + botm=botm, + ) + + ic = flopy.mf6.ModflowGwfic(gwf, strt=100.0) + + npf = flopy.mf6.ModflowGwfnpf( + gwf, + xt3doptions=False, + save_flows=True, + save_specific_discharge=True, + save_saturation=True, + icelltype=[1], + k=10.0, + ) + + sto_on = False + if sto_on: + sto = flopy.mf6.ModflowGwfsto( + gwf, + save_flows=True, + iconvert=[1], + ss=1.0e-5, + sy=0.3, + steady_state={0: True}, + transient={0: False}, + ) + + oc = flopy.mf6.ModflowGwfoc( + gwf, + budget_filerecord=f"{gwfname}.bud", + head_filerecord=f"{gwfname}.hds", + headprintrecord=[("COLUMNS", ncol, "WIDTH", 15, "DIGITS", 6, "GENERAL")], + saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], + printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], + ) + + rch_on = False + if rch_on: + rch = flopy.mf6.ModflowGwfrcha(gwf, recharge={0: 4.79e-3}, pname="RCH-1") + + # wel + q = np.full((nlay, nrow, ncol), DNODATA, dtype=float) + welconc = np.full((nlay, nrow, ncol), DNODATA, dtype=float) + for i in range(nrow): + q[0, i, 0] = 100.0 + welconc[0, i, 0] = 100.0 + wel = flopy.mf6.ModflowGwfwelg( + gwf, + auxiliary=["concentration"], + pname="WEL-1", + q=q, + aux=welconc, + ) + + # ghb + rows = [0, 1, 2, 3] + for ipak, i in enumerate(rows): + fname = f"flow.{ipak + 1}.ghb" + pname = f"GHB-{ipak + 1}" + bhead = np.full((nlay, nrow, ncol), DNODATA, dtype=float) + cond = np.full((nlay, nrow, ncol), DNODATA, dtype=float) + conc = np.full((nlay, nrow, ncol), DNODATA, dtype=float) + bhead[0, i, ncol - 1] = 50.0 + cond[0, i, ncol - 1] = 1000.0 + conc[0, i, ncol - 1] = 100.0 + flopy.mf6.ModflowGwfghbg( + gwf, + auxiliary=["concentration"], + filename=f"{fname}g", + pname=pname, + bhead=bhead, + cond=cond, + aux=conc, + ) + + # riv + rows = [4, 5, 6] + for ipak, i in enumerate(rows): + fname = f"flow.{ipak + 1}.riv" + pname = f"RIV-{ipak + 1}" + stage = np.full((nlay, nrow, ncol), DNODATA, dtype=float) + cond = np.full((nlay, nrow, ncol), DNODATA, dtype=float) + rbot = np.full((nlay, nrow, ncol), DNODATA, dtype=float) + conc = np.full((nlay, nrow, ncol), DNODATA, dtype=float) + stage[0, i, ncol - 1] = 50.0 + cond[0, i, ncol - 1] = 1000.0 + rbot[0, i, ncol - 1] = 0.0 + conc[0, i, ncol - 1] = 100.0 + riv = flopy.mf6.ModflowGwfrivg( + gwf, + auxiliary=["concentration"], + filename=f"{fname}g", + pname=pname, + stage=stage, + cond=cond, + rbot=rbot, + aux=[conc], + ) + + # drn + rows = [7, 8, 9] + for ipak, i in enumerate(rows): + fname = f"flow.{ipak + 1}.drn" + pname = f"DRN-{ipak + 1}" + elev = np.full((nlay, nrow, ncol), DNODATA, dtype=float) + cond = np.full((nlay, nrow, ncol), DNODATA, dtype=float) + conc = np.full((nlay, nrow, ncol), DNODATA, dtype=float) + elev[0, i, ncol - 1] = 50.0 + cond[0, i, ncol - 1] = 1000.0 + conc[0, i, ncol - 1] = 100.0 + drn = flopy.mf6.modflow.ModflowGwfdrng( + gwf, + auxiliary=["concentration"], + filename=f"{fname}g", + pname=pname, + elev=elev, + cond=cond, + aux=[conc], + ) + + return sim + + +def add_netcdf_vars(dataset, nc_info, dimmap): + def _data_shape(shape): + dims_l = [] + for d in shape: + dims_l.append(dimmap[d]) + + return dims_l + + for v in nc_info: + varname = nc_info[v]["varname"] + data = np.full( + _data_shape(nc_info[v]["nc_shape"]), + nc_info[v]["attrs"]["_FillValue"], + dtype=nc_info[v]["nc_type"], + ) + var_d = {varname: (nc_info[v]["nc_shape"], data)} + dataset = dataset.assign(var_d) + for a in nc_info[v]["attrs"]: + dataset[varname].attrs[a] = nc_info[v]["attrs"][a] + + return dataset + + +temp_dir = TemporaryDirectory() +workspace = Path(temp_dir.name) + +# run the non-netcdf simulation +sim = create_sim(workspace) +sim.write_simulation() +success, buff = sim.run_simulation(silent=True, report=True) +assert success, pformat(buff) + +# create directory for netcdf sim +sim.set_sim_path(workspace / "netcdf") +gwf = sim.get_model("flow") +gwf.name_file.nc_filerecord = "flow.structured.nc" +sim.write_simulation() + +# create the netcdf dataset +ds = xr.Dataset() + +# get model netcdf info +nc_info = gwf.netcdf_info() + +# update dataset with required attributes +for a in nc_info["attrs"]: + ds.attrs[a] = nc_info["attrs"][a] + +# get dim info from modelgrid +dis = gwf.modelgrid +xoff = dis.xoffset +yoff = dis.yoffset +x = xoff + dis.xycenters[0] +y = yoff + dis.xycenters[1] +z = [float(x) for x in range(1, dis.nlay + 1)] +nstp = sum(gwf.modeltime.nstp) +time = gwf.modeltime.tslen +nlay = dis.nlay +nrow = dis.nrow +ncol = dis.ncol +dimmap = {"time": nstp, "z": nlay, "y": nrow, "x": ncol} + +# create coordinate vars +var_d = {"time": (["time"], time), "z": (["z"], z), "y": (["y"], y), "x": (["x"], x)} +ds = ds.assign(var_d) + +# shape list for data arrays +shape = ["time", "z", "y", "x"] + +# update for welg +welg = gwf.get_package("wel-1") +nc_info = welg.netcdf_info() + +# create welg dataset variables +ds = add_netcdf_vars(ds, nc_info, dimmap) + +# update q netcdf array from flopy perioddata +for p in welg.q.get_data(): + if welg.q.get_data()[p] is not None: + istp = sum(gwf.modeltime.nstp[0:p]) + ds["wel-1_q"].values[istp] = welg.q.get_data()[p] + +# update conc netcdf array from flopy perioddata +for p in welg.aux.get_data(): + if welg.aux.get_data()[p] is not None: + istp = sum(gwf.modeltime.nstp[0:p]) + ds["wel-1_concentration"].values[istp] = welg.aux.get_data()[p][0] + +# update welg input to read from netcdf +with open(workspace / "netcdf" / "flow.welg", "w") as f: + f.write("BEGIN options\n") + f.write(" READARRAYGRID\n") + f.write(" auxiliary CONCENTRATION\n") + f.write("END options\n\n") + f.write("BEGIN period 1\n") + f.write(" q NETCDF\n") + f.write(" concentration NETCDF\n") + f.write("END period 1\n\n") + +# update for ghbg +for n in range(4): + ip = n + 1 + + # get ghbg package netcdf info + ghbg = gwf.get_package(f"ghb-{ip}") + nc_info = ghbg.netcdf_info() + + # create ghbg dataset variables + ds = add_netcdf_vars(ds, nc_info, dimmap) + + # update bhead netcdf array from flopy perioddata + for p in ghbg.bhead.get_data(): + if ghbg.bhead.get_data()[p] is not None: + istp = sum(gwf.modeltime.nstp[0:p]) + ds[f"ghb-{ip}_bhead"].values[istp] = ghbg.bhead.get_data()[p] + + # update cond netcdf array from flopy perioddata + for p in ghbg.cond.get_data(): + if ghbg.cond.get_data()[p] is not None: + istp = sum(gwf.modeltime.nstp[0:p]) + ds[f"ghb-{ip}_cond"].values[istp] = ghbg.cond.get_data()[p] + + # update conc netcdf array from flopy perioddata + for p in ghbg.aux.get_data(): + if ghbg.aux.get_data()[p] is not None: + istp = sum(gwf.modeltime.nstp[0:p]) + ds[f"ghb-{ip}_concentration"].values[istp] = ghbg.aux.get_data()[p][0] + + # update ghbg input to read from netcdf + with open(workspace / "netcdf" / f"flow.{ip}.ghbg", "w") as f: + f.write("BEGIN options\n") + f.write(" READARRAYGRID\n") + f.write(" auxiliary CONCENTRATION\n") + f.write("END options\n\n") + f.write("BEGIN period 1\n") + f.write(" bhead NETCDF\n") + f.write(" cond NETCDF\n") + f.write(" concentration NETCDF\n") + f.write("END period 1\n\n") + + +# update for rivg +for n in range(3): + ip = n + 1 + + # get rivg package netcdf info + rivg = gwf.get_package(f"riv-{ip}") + nc_info = rivg.netcdf_info() + + # create rivg dataset variables + ds = add_netcdf_vars(ds, nc_info, dimmap) + + # update stage netcdf array from flopy perioddata + for p in rivg.stage.get_data(): + if rivg.stage.get_data()[p] is not None: + istp = sum(gwf.modeltime.nstp[0:p]) + ds[f"riv-{ip}_stage"].values[istp] = rivg.stage.get_data()[p] + + # update cond netcdf array from flopy perioddata + for p in rivg.cond.get_data(): + if rivg.cond.get_data()[p] is not None: + istp = sum(gwf.modeltime.nstp[0:p]) + ds[f"riv-{ip}_cond"].values[istp] = rivg.cond.get_data()[p] + + # update rbot netcdf array from flopy perioddata + for p in rivg.rbot.get_data(): + if rivg.rbot.get_data()[p] is not None: + ds[f"riv-{ip}_rbot"].values[p] = rivg.rbot.get_data()[p] + + # update conc netcdf array from flopy perioddata + for p in rivg.aux.get_data(): + if rivg.aux.get_data()[p] is not None: + istp = sum(gwf.modeltime.nstp[0:p]) + ds[f"riv-{ip}_concentration"].values[istp] = rivg.aux.get_data()[p][0] + + # update rivg input to read from netcdf + with open(workspace / "netcdf" / f"flow.{ip}.rivg", "w") as f: + f.write("BEGIN options\n") + f.write(" READARRAYGRID\n") + f.write(" auxiliary CONCENTRATION\n") + f.write("END options\n\n") + f.write("BEGIN period 1\n") + f.write(" stage NETCDF\n") + f.write(" cond NETCDF\n") + f.write(" rbot NETCDF\n") + f.write(" concentration NETCDF\n") + f.write("END period 1\n\n") + + +# update for drng +for n in range(3): + ip = n + 1 + + # get drng package netcdf info + drng = gwf.get_package(f"drn-{ip}") + nc_info = drng.netcdf_info() + + # create drng dataset variables + ds = add_netcdf_vars(ds, nc_info, dimmap) + + # update elev netcdf array from flopy perioddata + for p in drng.elev.get_data(): + if drng.elev.get_data()[p] is not None: + istp = sum(gwf.modeltime.nstp[0:p]) + ds[f"drn-{ip}_elev"].values[istp] = drng.elev.get_data()[p] + + # update cond netcdf array from flopy perioddata + for p in drng.cond.get_data(): + if drng.cond.get_data()[p] is not None: + istp = sum(gwf.modeltime.nstp[0:p]) + ds[f"drn-{ip}_cond"].values[istp] = drng.cond.get_data()[p] + + # update conc netcdf array from flopy perioddata + for p in drng.aux.get_data(): + if drng.aux.get_data()[p] is not None: + istp = sum(gwf.modeltime.nstp[0:p]) + ds[f"drn-{ip}_concentration"].values[istp] = drng.aux.get_data()[p][0] + + # update drng input to read from netcdf + with open(workspace / "netcdf" / f"flow.{ip}.drng", "w") as f: + f.write("BEGIN options\n") + f.write(" READARRAYGRID\n") + f.write(" auxiliary CONCENTRATION\n") + f.write("END options\n\n") + f.write("BEGIN period 1\n") + f.write(" elev NETCDF\n") + f.write(" cond NETCDF\n") + f.write(" concentration NETCDF\n") + f.write("END period 1\n\n") + +# write the netcdf +ds.to_netcdf( + workspace / "netcdf/flow.structured.nc", format="NETCDF4", engine="netcdf4" +) + +# TODO need extended modflow 6 to run this simulation +# run the netcdf sim +# success, buff = sim.run_simulation(silent=True, report=True) +# assert success, pformat(buff) diff --git a/etc/environment.yml b/etc/environment.yml index 9e1fc34b8a..2d6f654dff 100644 --- a/etc/environment.yml +++ b/etc/environment.yml @@ -16,8 +16,7 @@ dependencies: - Jinja2>=3.0 - tomli - tomli-w - - pip: - - git+https://github.com/MODFLOW-ORG/modflow-devtools.git + - modflow-devtools==1.7.0 # lint - cffconvert @@ -31,8 +30,8 @@ dependencies: - jupyter - jupyter_client>=8.4.0 - jupytext + - modflow-devtools==1.7.0 - pip: - - git+https://github.com/MODFLOW-ORG/modflow-devtools.git - tach - pytest!=8.1.0 - pytest-benchmark @@ -65,3 +64,4 @@ dependencies: - xmipy - h5py - scikit-learn + - xarray diff --git a/flopy/mf6/data/mfstructure.py b/flopy/mf6/data/mfstructure.py index 3a44b4f9ab..9a9fddf34d 100644 --- a/flopy/mf6/data/mfstructure.py +++ b/flopy/mf6/data/mfstructure.py @@ -460,6 +460,7 @@ def __init__(self): self.parameter_name = None self.one_per_pkg = False self.jagged_array = None + self.netcdf = False def set_value(self, line, common): arr_line = line.strip().split() @@ -634,6 +635,8 @@ def set_value(self, line, common): self.one_per_pkg = bool(arr_line[1]) elif arr_line[0] == "jagged_array": self.jagged_array = arr_line[1] + elif arr_line[0] == "netcdf": + self.netcdf = arr_line[1] def get_type_string(self): return f"[{self.type_string}]" @@ -936,6 +939,7 @@ def __init__(self, data_item, model_data, package_type, dfn_list): or "nodes" in data_item.shape or len(data_item.layer_dims) > 1 ) + self.netcdf = data_item.netcdf self.num_data_items = len(data_item.data_items) self.record_within_record = False self.file_data = False diff --git a/flopy/mf6/mfmodel.py b/flopy/mf6/mfmodel.py index ab2a845f44..201bb94362 100644 --- a/flopy/mf6/mfmodel.py +++ b/flopy/mf6/mfmodel.py @@ -2194,3 +2194,49 @@ def _resolve_idomain(idomain, botm): else: return np.ones_like(botm) return idomain + + @staticmethod + def netcdf_attrs(mname, mtype, grid_type, mesh=None): + """Return dictionary of dataset (model) scoped attributes + Parameters + ---------- + mname : str + model name + mtype : str + model type + grid_type: + DiscretizationType + mesh : str + mesh type if dataset is ugrid compliant + """ + attrs = { + "modflow_grid": "", + "modflow_model": "", + } + if grid_type == DiscretizationType.DIS: + attrs["modflow_grid"] = "STRUCTURED" + elif grid_type == DiscretizationType.DISV: + attrs["modflow_grid"] = "VERTEX" + + attrs["modflow_model"] = f"{mname.upper()}: MODFLOW 6 {mtype.upper()} model" + + # supported => LAYERED + if mesh: + attrs["mesh"] = mesh + + return attrs + + def netcdf_info(self, mesh=None): + """Return dictionary of dataset (model) scoped attributes + Parameters + ---------- + mesh : str + mesh type if dataset is ugrid compliant + """ + attrs = MFModel.netcdf_attrs( + self.name, self.model_type, self.get_grid_type(), mesh + ) + + res_d = {} + res_d["attrs"] = attrs + return res_d diff --git a/flopy/mf6/mfpackage.py b/flopy/mf6/mfpackage.py index 702becfccd..052176b543 100644 --- a/flopy/mf6/mfpackage.py +++ b/flopy/mf6/mfpackage.py @@ -3424,6 +3424,162 @@ def plot(self, **kwargs): axes = PlotUtilities._plot_package_helper(self, **kwargs) return axes + @staticmethod + def _add_netcdf_entries( + attrs, mname, pname, data_item, auxiliary=None, mesh=None, nlay=1 + ): + DNODATA = 3.0e30 # MF6 DNODATA constant + FILLNA_INT32 = np.int32(-2147483647) # netcdf-fortran NF90_FILL_INT + FILLNA_DBL = 9.96920996838687e36 # netcdf-fortran NF90_FILL_DOUBLE + + if auxiliary: + auxnames = auxiliary + else: + auxnames = [] + + def _add_entry(tagname, iaux=None, layer=None): + + # netcdf variable dictionary + a = {} + + # set dict key and netcdf variable name + key = tagname + name = f"{pname}" + if iaux is not None: + key = f"{key}/{iaux}" + name = f"{name}_{auxiliary[iaux]}" + else: + name = f"{name}_{tagname}" + if layer is not None: + key = f"{key}/layer{layer}" + name = f"{name}_l{layer}" + + # add non-attrs to dictionary + a["varname"] = name.lower() + if (data_item.type) == DatumType.integer: + a["nc_type"] = np.int32 + elif (data_item.type) == DatumType.double_precision: + a["nc_type"] = np.float64 + dims = [] + if data_item.shape[0] == 'nodes': + if data_item.block_name == "griddata": + dims += ["x", "y", "z"] + elif data_item.block_name == "period": + dims += ["x", "y", "z", "time"] + else: + dimmap = {"nlay": "z", "nrow": "y", "ncol": "x"} + for s in data_item.shape: + for k, v in dimmap.items(): + s = s.replace(k, v) + dims.append(s) + a["nc_shape"] = dims[::-1] + + # add variable attributes dictionary + a["attrs"] = {} + a["attrs"]["modflow_input"] = (f"{mname}/{pname}/{tagname}").upper() + if iaux is not None: + a["attrs"]["modflow_iaux"] = iaux + 1 + if layer is not None: + a["attrs"]["layer"] = layer + if (data_item.type) == DatumType.integer: + a["attrs"]["_FillValue"] = FILLNA_INT32 + elif (data_item.type) == DatumType.double_precision: + if data_item.block_name == "griddata": + a["attrs"]["_FillValue"] = FILLNA_DBL + elif data_item.block_name == "period": + a["attrs"]["_FillValue"] = DNODATA + + # set dictionary + attrs[key] = a + + if data_item.layered and mesh == "LAYERED": + if data_item.name == "aux" or data_item.name == "auxvar": + for n, auxname in enumerate(auxnames): + for l in range(nlay): + _add_entry(data_item.name, n, l + 1) + else: + for l in range(nlay): + _add_entry(data_item.name, layer=l + 1) + else: + if data_item.name == "aux" or data_item.name == "auxvar": + for n, auxname in enumerate(auxnames): + _add_entry(data_item.name, iaux=n) + else: + _add_entry(data_item.name) + + @staticmethod + def netcdf_attrs(mtype, ptype, auxiliary=None, mesh=None, nlay=1): + from .data.mfstructure import DfnPackage, MFSimulationStructure + + attrs = {} + sim_struct = MFSimulationStructure() + + for package in MFPackage.__subclasses__(): + sim_struct.process_dfn(DfnPackage(package)) + p = DfnPackage(package) + c, sc = p.dfn_file_name.split(".")[0].split("-") + if c == mtype.lower() and sc == ptype.lower(): + sim_struct.add_package(p, model_file=False) + exit + + if ptype.lower() in sim_struct.package_struct_objs: + pso = sim_struct.package_struct_objs[ptype.lower()] + if pso.multi_package_support: + pname = f"<{ptype}name>" + else: + pname = ptype + for key, block in pso.blocks.items(): + if key != "griddata" and key != "period": + continue + for d in block.data_structures: + if block.data_structures[d].netcdf: + MFPackage._add_netcdf_entries( + attrs, + f"<{mtype}name>", + pname, + block.data_structures[d], + auxiliary, + mesh, + nlay, + ) + + res_d = {} + for k in list(attrs): + res_d[k] = attrs[k]["attrs"] + + return res_d + + def netcdf_info(self, mesh=None): + attrs = {} + + if self.dimensions.get_aux_variables(): + auxnames = list(self.dimensions.get_aux_variables()[0]) + if len(auxnames) and auxnames[0] == "auxiliary": + auxnames.pop(0) + else: + auxnames = [] + + for key, block in self.blocks.items(): + if key != "griddata" and key != "period": + continue + for dataset in block.datasets.values(): + if isinstance(dataset, mfdataarray.MFArray): + for index, data_item in enumerate( + dataset.structure.data_item_structures + ): + if dataset.structure.netcdf and dataset.has_data(): + MFPackage._add_netcdf_entries( + attrs, + self.model_name, + self.package_name, + dataset.structure, + auxnames, + mesh, + self.model_or_sim.modelgrid.nlay, + ) + + return attrs + class MFChildPackages: """ From 10aeee5e4489f24febd0fa1c8b9b832ca5cfa43d Mon Sep 17 00:00:00 2001 From: mjreno Date: Mon, 4 Aug 2025 08:04:25 -0400 Subject: [PATCH 02/44] regenerate classes in rtd workflow --- .github/workflows/rtd.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/rtd.yml b/.github/workflows/rtd.yml index a68d43e5fe..bd19b39e03 100644 --- a/.github/workflows/rtd.yml +++ b/.github/workflows/rtd.yml @@ -147,6 +147,10 @@ jobs: meson test --verbose --no-rebuild -C builddir echo "$(pwd)/bin" >> $GITHUB_PATH + - name: Update FloPy packages + working-directory: modflow6 + run: python -m flopy.mf6.utils.generate_classes --dfnpath doc/mf6io/mf6ivar/dfn + - name: Run tutorial and example notebooks working-directory: flopy/autotest run: pytest -v -n auto test_example_notebooks.py From da72a33cbb7388f0f90ec21970052938b4ac2987 Mon Sep 17 00:00:00 2001 From: mjreno Date: Fri, 8 Aug 2025 13:43:20 -0400 Subject: [PATCH 03/44] static methods return full info dict --- .docs/Notebooks/netcdf01_tutorial.py | 20 ++++++++++++++++---- .docs/Notebooks/netcdf02_tutorial.py | 21 +++++++++++++++++---- flopy/mf6/mfmodel.py | 8 ++++---- flopy/mf6/mfpackage.py | 16 ++++++---------- 4 files changed, 43 insertions(+), 22 deletions(-) diff --git a/.docs/Notebooks/netcdf01_tutorial.py b/.docs/Notebooks/netcdf01_tutorial.py index b70c22af3e..435b5d1b78 100644 --- a/.docs/Notebooks/netcdf01_tutorial.py +++ b/.docs/Notebooks/netcdf01_tutorial.py @@ -1,6 +1,6 @@ import sys from pathlib import Path -from pprint import pformat +from pprint import pformat, pprint from tempfile import TemporaryDirectory import numpy as np @@ -14,6 +14,8 @@ DNODATA = 3.0e30 +# A FloPy simulation ASCII sim that will be updated +# use netcdf inputs def create_sim(ws): name = "uzf01" perlen = [500.0] @@ -185,6 +187,8 @@ def create_sim(ws): return sim +# A subroutine that can update an xarray dataset +# with package netcdf information stored in a dict def add_netcdf_vars(dataset, nc_info, dimmap): def _data_shape(shape): dims_l = [] @@ -196,11 +200,11 @@ def _data_shape(shape): for v in nc_info: varname = nc_info[v]["varname"] data = np.full( - _data_shape(nc_info[v]["nc_shape"]), + _data_shape(nc_info[v]["netcdf_shape"]), nc_info[v]["attrs"]["_FillValue"], - dtype=nc_info[v]["nc_type"], + dtype=nc_info[v]["xarray_type"], ) - var_d = {varname: (nc_info[v]["nc_shape"], data)} + var_d = {varname: (nc_info[v]["netcdf_shape"], data)} dataset = dataset.assign(var_d) for a in nc_info[v]["attrs"]: dataset[varname].attrs[a] = nc_info[v]["attrs"][a] @@ -208,6 +212,7 @@ def _data_shape(shape): return dataset +# create temporary directories temp_dir = TemporaryDirectory() workspace = Path(temp_dir.name) @@ -228,6 +233,7 @@ def _data_shape(shape): # get model netcdf info nc_info = gwf.netcdf_info() +pprint(nc_info) # update dataset with required attributes for a in nc_info["attrs"]: @@ -254,6 +260,7 @@ def _data_shape(shape): # dis dis = gwf.get_package("dis") nc_info = dis.netcdf_info() +pprint(nc_info) # create dis dataset variables ds = add_netcdf_vars(ds, nc_info, dimmap) @@ -286,6 +293,7 @@ def _data_shape(shape): # npf npf = gwf.get_package("npf") nc_info = npf.netcdf_info() +pprint(nc_info) # create npf dataset variables ds = add_netcdf_vars(ds, nc_info, dimmap) @@ -306,6 +314,7 @@ def _data_shape(shape): # get ghbg package netcdf info ghbg = gwf.get_package("ghbg_0") nc_info = ghbg.netcdf_info() +pprint(nc_info) # create ghbg dataset variables ds = add_netcdf_vars(ds, nc_info, dimmap) @@ -320,6 +329,9 @@ def _data_shape(shape): istp = sum(gwf.modeltime.nstp[0:p]) ds["ghbg_0_cond"].values[istp] = ghbg.cond.get_data()[p] +# show the dataset +print(ds) + # write the netcdf ds.to_netcdf( workspace / "netcdf/uzf01.structured.nc", format="NETCDF4", engine="netcdf4" diff --git a/.docs/Notebooks/netcdf02_tutorial.py b/.docs/Notebooks/netcdf02_tutorial.py index 4df8cbd297..6581650389 100644 --- a/.docs/Notebooks/netcdf02_tutorial.py +++ b/.docs/Notebooks/netcdf02_tutorial.py @@ -1,6 +1,6 @@ import sys from pathlib import Path -from pprint import pformat +from pprint import pformat, pprint from tempfile import TemporaryDirectory import numpy as np @@ -15,6 +15,8 @@ DNODATA = 3.0e30 +# A FloPy simulation ASCII sim that will be updated +# use netcdf inputs def create_sim(ws): name = "flow" gwfname = name @@ -186,6 +188,8 @@ def create_sim(ws): return sim +# A subroutine that can update an xarray dataset +# with package netcdf information stored in a dict def add_netcdf_vars(dataset, nc_info, dimmap): def _data_shape(shape): dims_l = [] @@ -197,11 +201,11 @@ def _data_shape(shape): for v in nc_info: varname = nc_info[v]["varname"] data = np.full( - _data_shape(nc_info[v]["nc_shape"]), + _data_shape(nc_info[v]["netcdf_shape"]), nc_info[v]["attrs"]["_FillValue"], - dtype=nc_info[v]["nc_type"], + dtype=nc_info[v]["xarray_type"], ) - var_d = {varname: (nc_info[v]["nc_shape"], data)} + var_d = {varname: (nc_info[v]["netcdf_shape"], data)} dataset = dataset.assign(var_d) for a in nc_info[v]["attrs"]: dataset[varname].attrs[a] = nc_info[v]["attrs"][a] @@ -209,6 +213,7 @@ def _data_shape(shape): return dataset +# create temporary directories temp_dir = TemporaryDirectory() workspace = Path(temp_dir.name) @@ -229,6 +234,7 @@ def _data_shape(shape): # get model netcdf info nc_info = gwf.netcdf_info() +pprint(nc_info) # update dataset with required attributes for a in nc_info["attrs"]: @@ -258,6 +264,7 @@ def _data_shape(shape): # update for welg welg = gwf.get_package("wel-1") nc_info = welg.netcdf_info() +pprint(nc_info) # create welg dataset variables ds = add_netcdf_vars(ds, nc_info, dimmap) @@ -292,6 +299,7 @@ def _data_shape(shape): # get ghbg package netcdf info ghbg = gwf.get_package(f"ghb-{ip}") nc_info = ghbg.netcdf_info() + pprint(nc_info) # create ghbg dataset variables ds = add_netcdf_vars(ds, nc_info, dimmap) @@ -334,6 +342,7 @@ def _data_shape(shape): # get rivg package netcdf info rivg = gwf.get_package(f"riv-{ip}") nc_info = rivg.netcdf_info() + pprint(nc_info) # create rivg dataset variables ds = add_netcdf_vars(ds, nc_info, dimmap) @@ -382,6 +391,7 @@ def _data_shape(shape): # get drng package netcdf info drng = gwf.get_package(f"drn-{ip}") nc_info = drng.netcdf_info() + pprint(nc_info) # create drng dataset variables ds = add_netcdf_vars(ds, nc_info, dimmap) @@ -416,6 +426,9 @@ def _data_shape(shape): f.write(" concentration NETCDF\n") f.write("END period 1\n\n") +# show the dataset +print(ds) + # write the netcdf ds.to_netcdf( workspace / "netcdf/flow.structured.nc", format="NETCDF4", engine="netcdf4" diff --git a/flopy/mf6/mfmodel.py b/flopy/mf6/mfmodel.py index 201bb94362..56d6e1a8a3 100644 --- a/flopy/mf6/mfmodel.py +++ b/flopy/mf6/mfmodel.py @@ -2196,14 +2196,14 @@ def _resolve_idomain(idomain, botm): return idomain @staticmethod - def netcdf_attrs(mname, mtype, grid_type, mesh=None): + def netcdf_model(mname, mtype, grid_type, mesh=None): """Return dictionary of dataset (model) scoped attributes Parameters ---------- mname : str model name mtype : str - model type + model type, e.g. GWF6 grid_type: DiscretizationType mesh : str @@ -2218,7 +2218,7 @@ def netcdf_attrs(mname, mtype, grid_type, mesh=None): elif grid_type == DiscretizationType.DISV: attrs["modflow_grid"] = "VERTEX" - attrs["modflow_model"] = f"{mname.upper()}: MODFLOW 6 {mtype.upper()} model" + attrs["modflow_model"] = f"{mtype.upper()}: {mname.upper()}" # supported => LAYERED if mesh: @@ -2233,7 +2233,7 @@ def netcdf_info(self, mesh=None): mesh : str mesh type if dataset is ugrid compliant """ - attrs = MFModel.netcdf_attrs( + attrs = MFModel.netcdf_model( self.name, self.model_type, self.get_grid_type(), mesh ) diff --git a/flopy/mf6/mfpackage.py b/flopy/mf6/mfpackage.py index 052176b543..d559ee1d17 100644 --- a/flopy/mf6/mfpackage.py +++ b/flopy/mf6/mfpackage.py @@ -3457,9 +3457,9 @@ def _add_entry(tagname, iaux=None, layer=None): # add non-attrs to dictionary a["varname"] = name.lower() if (data_item.type) == DatumType.integer: - a["nc_type"] = np.int32 + a["xarray_type"] = np.int32 elif (data_item.type) == DatumType.double_precision: - a["nc_type"] = np.float64 + a["xarray_type"] = np.float64 dims = [] if data_item.shape[0] == 'nodes': if data_item.block_name == "griddata": @@ -3472,7 +3472,7 @@ def _add_entry(tagname, iaux=None, layer=None): for k, v in dimmap.items(): s = s.replace(k, v) dims.append(s) - a["nc_shape"] = dims[::-1] + a["netcdf_shape"] = dims[::-1] # add variable attributes dictionary a["attrs"] = {} @@ -3492,7 +3492,7 @@ def _add_entry(tagname, iaux=None, layer=None): # set dictionary attrs[key] = a - if data_item.layered and mesh == "LAYERED": + if data_item.layered and mesh and mesh.upper() == "LAYERED": if data_item.name == "aux" or data_item.name == "auxvar": for n, auxname in enumerate(auxnames): for l in range(nlay): @@ -3508,7 +3508,7 @@ def _add_entry(tagname, iaux=None, layer=None): _add_entry(data_item.name) @staticmethod - def netcdf_attrs(mtype, ptype, auxiliary=None, mesh=None, nlay=1): + def netcdf_package(mtype, ptype, auxiliary=None, mesh=None, nlay=1): from .data.mfstructure import DfnPackage, MFSimulationStructure attrs = {} @@ -3543,11 +3543,7 @@ def netcdf_attrs(mtype, ptype, auxiliary=None, mesh=None, nlay=1): nlay, ) - res_d = {} - for k in list(attrs): - res_d[k] = attrs[k]["attrs"] - - return res_d + return attrs def netcdf_info(self, mesh=None): attrs = {} From 18cc45cef588df2a7d16464c97496baad24e5204 Mon Sep 17 00:00:00 2001 From: mjreno Date: Fri, 8 Aug 2025 14:20:06 -0400 Subject: [PATCH 04/44] make model dicts consistent --- flopy/mf6/mfmodel.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/flopy/mf6/mfmodel.py b/flopy/mf6/mfmodel.py index 56d6e1a8a3..621887121c 100644 --- a/flopy/mf6/mfmodel.py +++ b/flopy/mf6/mfmodel.py @@ -2222,9 +2222,9 @@ def netcdf_model(mname, mtype, grid_type, mesh=None): # supported => LAYERED if mesh: - attrs["mesh"] = mesh + attrs["mesh"] = mesh.upper() - return attrs + return {"attrs": attrs} def netcdf_info(self, mesh=None): """Return dictionary of dataset (model) scoped attributes @@ -2233,10 +2233,6 @@ def netcdf_info(self, mesh=None): mesh : str mesh type if dataset is ugrid compliant """ - attrs = MFModel.netcdf_model( + return MFModel.netcdf_model( self.name, self.model_type, self.get_grid_type(), mesh ) - - res_d = {} - res_d["attrs"] = attrs - return res_d From a546f740ecb1029a7456a78f090866eb12b754cf Mon Sep 17 00:00:00 2001 From: mjreno Date: Mon, 11 Aug 2025 09:00:58 -0400 Subject: [PATCH 05/44] remove second tutorial for now --- .docs/Notebooks/netcdf01_tutorial.py | 144 ++++++++- .docs/Notebooks/netcdf02_tutorial.py | 440 --------------------------- DEVELOPER.md | 4 +- 3 files changed, 131 insertions(+), 457 deletions(-) delete mode 100644 .docs/Notebooks/netcdf02_tutorial.py diff --git a/.docs/Notebooks/netcdf01_tutorial.py b/.docs/Notebooks/netcdf01_tutorial.py index 435b5d1b78..31bc974e5a 100644 --- a/.docs/Notebooks/netcdf01_tutorial.py +++ b/.docs/Notebooks/netcdf01_tutorial.py @@ -1,3 +1,39 @@ +# --- +# jupyter: +# jupytext: +# cell_metadata_filter: -all +# formats: ipynb,py:light +# text_representation: +# extension: .py +# format_name: light +# format_version: '1.5' +# jupytext_version: 1.17.2 +# kernelspec: +# display_name: Python 3 +# language: python +# name: python3 +# metadata: +# section: mf6 +# --- + +# # MODFLOW 6: Generate MODFLOW 6 NetCDF input from existing FloPy sim +# +# ## NetCDF tutorial 1: MODFLOW 6 structured input file +# +# This tutorial demonstrates how to generate a MODFLOW 6 NetCDF file from +# an existing FloPy simulation. In the tutorial, candidate array data is +# added to an xarray dataset and annotated so that the generated NetCDF +# file can be read by MODFLOW 6 as model input. +# +# This tutorial generates a structured NetCDF variant - for more information +# on supported MODFLOW 6 NetCDF formats see: +# [MODFLOW NetCDF Format](https://github.com/MODFLOW-ORG/modflow6/wiki/MODFLOW-NetCDF-Format). +# +# Note that NetCDF is only supported by the Extended version of MODFLOW 6. +# A nightly windows build of Extended MODFLOW 6 is available from +# [nightly build](https://github.com/MODFLOW-ORG/modflow6-nightly-build). + +# package import import sys from pathlib import Path from pprint import pformat, pprint @@ -11,11 +47,29 @@ print(sys.version) print(f"flopy version: {flopy.__version__}") +# ## Define DNODATA constant +# +# DNODATA is an important constant for MODFLOW 6 timeseries grid input +# data. It signifies that the cell has no data defined for the time step +# in question. These cell values are discared and have no impact on the +# simulation. + +# DNODATA constant DNODATA = 3.0e30 +# ## Define ASCII input baseline simulation +# +# For the purposes of this tutorial, the specifics of this simulation +# other than it is a candidate for NetCDF input are not a focus. It +# is a NetCDF input candidate because it defines a candidate model type +# (GWF6) with packages that support NetCDF input parameters. +# +# A NetCDF dataset will be created from array data in the DIS, NPF and +# GHBG packages. Data will be copied from the package objects into dataset +# arrays. + -# A FloPy simulation ASCII sim that will be updated -# use netcdf inputs +# A FloPy ASCII base simulation that will be updated use netcdf inputs def create_sim(ws): name = "uzf01" perlen = [500.0] @@ -187,8 +241,16 @@ def create_sim(ws): return sim -# A subroutine that can update an xarray dataset -# with package netcdf information stored in a dict +# ## Create helper function to update dataset +# +# This function updates an Xarray dataset to add variables described +# in a FloPy provided dictionary. +# +# A dimension map variable relates FloPy and NetCDF dimensions names. + + +# A subroutine that can update an xarray dataset with package +# netcdf information stored in a dict def add_netcdf_vars(dataset, nc_info, dimmap): def _data_shape(shape): dims_l = [] @@ -223,14 +285,25 @@ def _data_shape(shape): assert success, pformat(buff) # create directory for netcdf sim +# set model name file nc_filerecord attribute to export name sim.set_sim_path(workspace / "netcdf") gwf = sim.get_model("uzf01") gwf.name_file.nc_filerecord = "uzf01.structured.nc" sim.write_simulation() -# create the netcdf dataset +# create the dataset ds = xr.Dataset() +# ## Access model NetCDF attributes +# +# Access model scoped NetCDF details by storing the dictionary +# returned from netcdf_info(). In particular, we need to set dataset +# scoped attributes that are stored in the model netcdf info dict. +# +# First, retrieve and store the netcdf info dictionary and display +# its contents. Then, in the following step, update the dataset with +# the model scoped attributes defined in the dictionary. + # get model netcdf info nc_info = gwf.netcdf_info() pprint(nc_info) @@ -239,7 +312,7 @@ def _data_shape(shape): for a in nc_info["attrs"]: ds.attrs[a] = nc_info["attrs"][a] -# set dimensional info +# define dimensional info dis = gwf.modelgrid xoff = dis.xoffset yoff = dis.yoffset @@ -253,11 +326,22 @@ def _data_shape(shape): ncol = dis.ncol dimmap = {"time": nstp, "z": nlay, "y": nrow, "x": ncol} -# create coordinate vars +# create dataset coordinate vars var_d = {"time": (["time"], time), "z": (["z"], z), "y": (["y"], y), "x": (["x"], x)} ds = ds.assign(var_d) -# dis +# ## Access package NetCDF attributes +# +# Access package scoped NetCDF details by storing the dictionary returned +# from netcdf_info(). We need to set package variable attributes that are +# stored in the package netcdf info dict, but we also need other information +# that is relevant to creating the variables themselves. +# +# The contents of the info dictionary are shown and then, in the following +# step, the dictionary and the dataset are passed to a helper routine that +# create the intended array variables. + +# get dis package netcdf info dis = gwf.get_package("dis") nc_info = dis.netcdf_info() pprint(nc_info) @@ -265,14 +349,30 @@ def _data_shape(shape): # create dis dataset variables ds = add_netcdf_vars(ds, nc_info, dimmap) -# update data +# ## Update array data +# +# We have created dataset array variables for the package but they do not yet +# define the expected input data for MODFLOW 6. We will take advantage of the +# existing simulation objects and update the dataset. + +# update dataset from dis arrays ds["dis_delr"].values = dis.delr.get_data() ds["dis_delc"].values = dis.delc.get_data() ds["dis_top"].values = dis.top.get_data() ds["dis_botm"].values = dis.botm.get_data() ds["dis_idomain"].values = dis.idomain.get_data() -# update dis to read from netcdf +# ## Update MODFLOW 6 package input file +# +# MODFLOW 6 input data for the package is now in the dataset. Once the NetCDF +# file is generated, we need to configure MODFLOW 6 so that it looks to that +# file for the package array input. The ASCII will no longer defined the arrays- +# instead the array names will be followed by the NETCDF keyword. +# +# We will simply overwrite the entire MODFLOW 6 DIS package input file with the +# following code block. + +# rewrite mf6 dis input to read from netcdf with open(workspace / "netcdf" / "uzf01.dis", "w") as f: f.write("BEGIN options\n") f.write(" crs EPSG:26916\n") @@ -290,7 +390,11 @@ def _data_shape(shape): f.write(" idomain NETCDF\n") f.write("END griddata\n") -# npf +# ## Update MODFLOW 6 package input file +# +# Follow the same process as above for the NPF package. + +# get npf package netcdf info npf = gwf.get_package("npf") nc_info = npf.netcdf_info() pprint(nc_info) @@ -298,11 +402,11 @@ def _data_shape(shape): # create npf dataset variables ds = add_netcdf_vars(ds, nc_info, dimmap) -# update data +# update dataset from npf arrays ds["npf_icelltype"].values = npf.icelltype.get_data() ds["npf_k"].values = npf.k.get_data() -# update npf to read from netcdf +# rewrite mf6 npf input to read from netcdf with open(workspace / "netcdf" / "uzf01.npf", "w") as f: f.write("BEGIN options\n") f.write("END options\n\n") @@ -311,6 +415,14 @@ def _data_shape(shape): f.write(" k NETCDF\n") f.write("END griddata\n") +# ## Update MODFLOW 6 package input file +# +# Follow the same process as above for the GHBG package. The difference is +# that this is PERIOD input and therefore stored as timeseries data in the +# NetCDF file. As NETCDF timeseries and defined in terms of total number of +# simulation steps, care must be taken in the translation of FloPy period +# data to the timeseries. + # get ghbg package netcdf info ghbg = gwf.get_package("ghbg_0") nc_info = ghbg.netcdf_info() @@ -320,11 +432,13 @@ def _data_shape(shape): ds = add_netcdf_vars(ds, nc_info, dimmap) # update bhead netcdf array from flopy perioddata +# timeseries step index is first of stress period for p in ghbg.bhead.get_data(): istp = sum(gwf.modeltime.nstp[0:p]) ds["ghbg_0_bhead"].values[istp] = ghbg.bhead.get_data()[p] # update cond netcdf array from flopy perioddata +# timeseries step index is first of stress period for p in ghbg.cond.get_data(): istp = sum(gwf.modeltime.nstp[0:p]) ds["ghbg_0_cond"].values[istp] = ghbg.cond.get_data()[p] @@ -332,12 +446,12 @@ def _data_shape(shape): # show the dataset print(ds) -# write the netcdf +# write dataset to netcdf ds.to_netcdf( workspace / "netcdf/uzf01.structured.nc", format="NETCDF4", engine="netcdf4" ) -# update ghbg to read from netcdf +# rewrite mf6 ghbg input to read from netcdf with open(workspace / "netcdf/uzf01.ghbg", "w") as f: f.write("BEGIN options\n") f.write(" READARRAYGRID\n") diff --git a/.docs/Notebooks/netcdf02_tutorial.py b/.docs/Notebooks/netcdf02_tutorial.py deleted file mode 100644 index 6581650389..0000000000 --- a/.docs/Notebooks/netcdf02_tutorial.py +++ /dev/null @@ -1,440 +0,0 @@ -import sys -from pathlib import Path -from pprint import pformat, pprint -from tempfile import TemporaryDirectory - -import numpy as np -import xarray as xr - -import flopy - -print(sys.version) -print(f"flopy version: {flopy.__version__}") -# - - -DNODATA = 3.0e30 - - -# A FloPy simulation ASCII sim that will be updated -# use netcdf inputs -def create_sim(ws): - name = "flow" - gwfname = name - sim = flopy.mf6.MFSimulation(sim_name=name, sim_ws=ws, exe_name="mf6") - tdis_rc = [(100.0, 1, 1.0), (100.0, 1, 1.0)] - nper = len(tdis_rc) - tdis = flopy.mf6.ModflowTdis(sim, time_units="DAYS", nper=nper, perioddata=tdis_rc) - - gwf = flopy.mf6.ModflowGwf(sim, modelname=gwfname, save_flows=True) - - # ims - hclose = 1.0e-6 - rclose = 1.0e-6 - nouter = 1000 - ninner = 100 - relax = 0.99 - imsgwf = flopy.mf6.ModflowIms( - sim, - print_option="ALL", - outer_dvclose=hclose, - outer_maximum=nouter, - under_relaxation="NONE", - inner_maximum=ninner, - inner_dvclose=hclose, - rcloserecord=rclose, - linear_acceleration="CG", - scaling_method="NONE", - reordering_method="NONE", - relaxation_factor=relax, - filename=f"{gwfname}.ims", - ) - - nlay = 1 - nrow = 10 - ncol = 10 - delr = 10.0 - delc = 10.0 - top = 100.0 - botm = 0.0 - - dis = flopy.mf6.ModflowGwfdis( - gwf, - nlay=nlay, - nrow=nrow, - ncol=ncol, - delr=delr, - delc=delc, - top=top, - botm=botm, - ) - - ic = flopy.mf6.ModflowGwfic(gwf, strt=100.0) - - npf = flopy.mf6.ModflowGwfnpf( - gwf, - xt3doptions=False, - save_flows=True, - save_specific_discharge=True, - save_saturation=True, - icelltype=[1], - k=10.0, - ) - - sto_on = False - if sto_on: - sto = flopy.mf6.ModflowGwfsto( - gwf, - save_flows=True, - iconvert=[1], - ss=1.0e-5, - sy=0.3, - steady_state={0: True}, - transient={0: False}, - ) - - oc = flopy.mf6.ModflowGwfoc( - gwf, - budget_filerecord=f"{gwfname}.bud", - head_filerecord=f"{gwfname}.hds", - headprintrecord=[("COLUMNS", ncol, "WIDTH", 15, "DIGITS", 6, "GENERAL")], - saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], - printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], - ) - - rch_on = False - if rch_on: - rch = flopy.mf6.ModflowGwfrcha(gwf, recharge={0: 4.79e-3}, pname="RCH-1") - - # wel - q = np.full((nlay, nrow, ncol), DNODATA, dtype=float) - welconc = np.full((nlay, nrow, ncol), DNODATA, dtype=float) - for i in range(nrow): - q[0, i, 0] = 100.0 - welconc[0, i, 0] = 100.0 - wel = flopy.mf6.ModflowGwfwelg( - gwf, - auxiliary=["concentration"], - pname="WEL-1", - q=q, - aux=welconc, - ) - - # ghb - rows = [0, 1, 2, 3] - for ipak, i in enumerate(rows): - fname = f"flow.{ipak + 1}.ghb" - pname = f"GHB-{ipak + 1}" - bhead = np.full((nlay, nrow, ncol), DNODATA, dtype=float) - cond = np.full((nlay, nrow, ncol), DNODATA, dtype=float) - conc = np.full((nlay, nrow, ncol), DNODATA, dtype=float) - bhead[0, i, ncol - 1] = 50.0 - cond[0, i, ncol - 1] = 1000.0 - conc[0, i, ncol - 1] = 100.0 - flopy.mf6.ModflowGwfghbg( - gwf, - auxiliary=["concentration"], - filename=f"{fname}g", - pname=pname, - bhead=bhead, - cond=cond, - aux=conc, - ) - - # riv - rows = [4, 5, 6] - for ipak, i in enumerate(rows): - fname = f"flow.{ipak + 1}.riv" - pname = f"RIV-{ipak + 1}" - stage = np.full((nlay, nrow, ncol), DNODATA, dtype=float) - cond = np.full((nlay, nrow, ncol), DNODATA, dtype=float) - rbot = np.full((nlay, nrow, ncol), DNODATA, dtype=float) - conc = np.full((nlay, nrow, ncol), DNODATA, dtype=float) - stage[0, i, ncol - 1] = 50.0 - cond[0, i, ncol - 1] = 1000.0 - rbot[0, i, ncol - 1] = 0.0 - conc[0, i, ncol - 1] = 100.0 - riv = flopy.mf6.ModflowGwfrivg( - gwf, - auxiliary=["concentration"], - filename=f"{fname}g", - pname=pname, - stage=stage, - cond=cond, - rbot=rbot, - aux=[conc], - ) - - # drn - rows = [7, 8, 9] - for ipak, i in enumerate(rows): - fname = f"flow.{ipak + 1}.drn" - pname = f"DRN-{ipak + 1}" - elev = np.full((nlay, nrow, ncol), DNODATA, dtype=float) - cond = np.full((nlay, nrow, ncol), DNODATA, dtype=float) - conc = np.full((nlay, nrow, ncol), DNODATA, dtype=float) - elev[0, i, ncol - 1] = 50.0 - cond[0, i, ncol - 1] = 1000.0 - conc[0, i, ncol - 1] = 100.0 - drn = flopy.mf6.modflow.ModflowGwfdrng( - gwf, - auxiliary=["concentration"], - filename=f"{fname}g", - pname=pname, - elev=elev, - cond=cond, - aux=[conc], - ) - - return sim - - -# A subroutine that can update an xarray dataset -# with package netcdf information stored in a dict -def add_netcdf_vars(dataset, nc_info, dimmap): - def _data_shape(shape): - dims_l = [] - for d in shape: - dims_l.append(dimmap[d]) - - return dims_l - - for v in nc_info: - varname = nc_info[v]["varname"] - data = np.full( - _data_shape(nc_info[v]["netcdf_shape"]), - nc_info[v]["attrs"]["_FillValue"], - dtype=nc_info[v]["xarray_type"], - ) - var_d = {varname: (nc_info[v]["netcdf_shape"], data)} - dataset = dataset.assign(var_d) - for a in nc_info[v]["attrs"]: - dataset[varname].attrs[a] = nc_info[v]["attrs"][a] - - return dataset - - -# create temporary directories -temp_dir = TemporaryDirectory() -workspace = Path(temp_dir.name) - -# run the non-netcdf simulation -sim = create_sim(workspace) -sim.write_simulation() -success, buff = sim.run_simulation(silent=True, report=True) -assert success, pformat(buff) - -# create directory for netcdf sim -sim.set_sim_path(workspace / "netcdf") -gwf = sim.get_model("flow") -gwf.name_file.nc_filerecord = "flow.structured.nc" -sim.write_simulation() - -# create the netcdf dataset -ds = xr.Dataset() - -# get model netcdf info -nc_info = gwf.netcdf_info() -pprint(nc_info) - -# update dataset with required attributes -for a in nc_info["attrs"]: - ds.attrs[a] = nc_info["attrs"][a] - -# get dim info from modelgrid -dis = gwf.modelgrid -xoff = dis.xoffset -yoff = dis.yoffset -x = xoff + dis.xycenters[0] -y = yoff + dis.xycenters[1] -z = [float(x) for x in range(1, dis.nlay + 1)] -nstp = sum(gwf.modeltime.nstp) -time = gwf.modeltime.tslen -nlay = dis.nlay -nrow = dis.nrow -ncol = dis.ncol -dimmap = {"time": nstp, "z": nlay, "y": nrow, "x": ncol} - -# create coordinate vars -var_d = {"time": (["time"], time), "z": (["z"], z), "y": (["y"], y), "x": (["x"], x)} -ds = ds.assign(var_d) - -# shape list for data arrays -shape = ["time", "z", "y", "x"] - -# update for welg -welg = gwf.get_package("wel-1") -nc_info = welg.netcdf_info() -pprint(nc_info) - -# create welg dataset variables -ds = add_netcdf_vars(ds, nc_info, dimmap) - -# update q netcdf array from flopy perioddata -for p in welg.q.get_data(): - if welg.q.get_data()[p] is not None: - istp = sum(gwf.modeltime.nstp[0:p]) - ds["wel-1_q"].values[istp] = welg.q.get_data()[p] - -# update conc netcdf array from flopy perioddata -for p in welg.aux.get_data(): - if welg.aux.get_data()[p] is not None: - istp = sum(gwf.modeltime.nstp[0:p]) - ds["wel-1_concentration"].values[istp] = welg.aux.get_data()[p][0] - -# update welg input to read from netcdf -with open(workspace / "netcdf" / "flow.welg", "w") as f: - f.write("BEGIN options\n") - f.write(" READARRAYGRID\n") - f.write(" auxiliary CONCENTRATION\n") - f.write("END options\n\n") - f.write("BEGIN period 1\n") - f.write(" q NETCDF\n") - f.write(" concentration NETCDF\n") - f.write("END period 1\n\n") - -# update for ghbg -for n in range(4): - ip = n + 1 - - # get ghbg package netcdf info - ghbg = gwf.get_package(f"ghb-{ip}") - nc_info = ghbg.netcdf_info() - pprint(nc_info) - - # create ghbg dataset variables - ds = add_netcdf_vars(ds, nc_info, dimmap) - - # update bhead netcdf array from flopy perioddata - for p in ghbg.bhead.get_data(): - if ghbg.bhead.get_data()[p] is not None: - istp = sum(gwf.modeltime.nstp[0:p]) - ds[f"ghb-{ip}_bhead"].values[istp] = ghbg.bhead.get_data()[p] - - # update cond netcdf array from flopy perioddata - for p in ghbg.cond.get_data(): - if ghbg.cond.get_data()[p] is not None: - istp = sum(gwf.modeltime.nstp[0:p]) - ds[f"ghb-{ip}_cond"].values[istp] = ghbg.cond.get_data()[p] - - # update conc netcdf array from flopy perioddata - for p in ghbg.aux.get_data(): - if ghbg.aux.get_data()[p] is not None: - istp = sum(gwf.modeltime.nstp[0:p]) - ds[f"ghb-{ip}_concentration"].values[istp] = ghbg.aux.get_data()[p][0] - - # update ghbg input to read from netcdf - with open(workspace / "netcdf" / f"flow.{ip}.ghbg", "w") as f: - f.write("BEGIN options\n") - f.write(" READARRAYGRID\n") - f.write(" auxiliary CONCENTRATION\n") - f.write("END options\n\n") - f.write("BEGIN period 1\n") - f.write(" bhead NETCDF\n") - f.write(" cond NETCDF\n") - f.write(" concentration NETCDF\n") - f.write("END period 1\n\n") - - -# update for rivg -for n in range(3): - ip = n + 1 - - # get rivg package netcdf info - rivg = gwf.get_package(f"riv-{ip}") - nc_info = rivg.netcdf_info() - pprint(nc_info) - - # create rivg dataset variables - ds = add_netcdf_vars(ds, nc_info, dimmap) - - # update stage netcdf array from flopy perioddata - for p in rivg.stage.get_data(): - if rivg.stage.get_data()[p] is not None: - istp = sum(gwf.modeltime.nstp[0:p]) - ds[f"riv-{ip}_stage"].values[istp] = rivg.stage.get_data()[p] - - # update cond netcdf array from flopy perioddata - for p in rivg.cond.get_data(): - if rivg.cond.get_data()[p] is not None: - istp = sum(gwf.modeltime.nstp[0:p]) - ds[f"riv-{ip}_cond"].values[istp] = rivg.cond.get_data()[p] - - # update rbot netcdf array from flopy perioddata - for p in rivg.rbot.get_data(): - if rivg.rbot.get_data()[p] is not None: - ds[f"riv-{ip}_rbot"].values[p] = rivg.rbot.get_data()[p] - - # update conc netcdf array from flopy perioddata - for p in rivg.aux.get_data(): - if rivg.aux.get_data()[p] is not None: - istp = sum(gwf.modeltime.nstp[0:p]) - ds[f"riv-{ip}_concentration"].values[istp] = rivg.aux.get_data()[p][0] - - # update rivg input to read from netcdf - with open(workspace / "netcdf" / f"flow.{ip}.rivg", "w") as f: - f.write("BEGIN options\n") - f.write(" READARRAYGRID\n") - f.write(" auxiliary CONCENTRATION\n") - f.write("END options\n\n") - f.write("BEGIN period 1\n") - f.write(" stage NETCDF\n") - f.write(" cond NETCDF\n") - f.write(" rbot NETCDF\n") - f.write(" concentration NETCDF\n") - f.write("END period 1\n\n") - - -# update for drng -for n in range(3): - ip = n + 1 - - # get drng package netcdf info - drng = gwf.get_package(f"drn-{ip}") - nc_info = drng.netcdf_info() - pprint(nc_info) - - # create drng dataset variables - ds = add_netcdf_vars(ds, nc_info, dimmap) - - # update elev netcdf array from flopy perioddata - for p in drng.elev.get_data(): - if drng.elev.get_data()[p] is not None: - istp = sum(gwf.modeltime.nstp[0:p]) - ds[f"drn-{ip}_elev"].values[istp] = drng.elev.get_data()[p] - - # update cond netcdf array from flopy perioddata - for p in drng.cond.get_data(): - if drng.cond.get_data()[p] is not None: - istp = sum(gwf.modeltime.nstp[0:p]) - ds[f"drn-{ip}_cond"].values[istp] = drng.cond.get_data()[p] - - # update conc netcdf array from flopy perioddata - for p in drng.aux.get_data(): - if drng.aux.get_data()[p] is not None: - istp = sum(gwf.modeltime.nstp[0:p]) - ds[f"drn-{ip}_concentration"].values[istp] = drng.aux.get_data()[p][0] - - # update drng input to read from netcdf - with open(workspace / "netcdf" / f"flow.{ip}.drng", "w") as f: - f.write("BEGIN options\n") - f.write(" READARRAYGRID\n") - f.write(" auxiliary CONCENTRATION\n") - f.write("END options\n\n") - f.write("BEGIN period 1\n") - f.write(" elev NETCDF\n") - f.write(" cond NETCDF\n") - f.write(" concentration NETCDF\n") - f.write("END period 1\n\n") - -# show the dataset -print(ds) - -# write the netcdf -ds.to_netcdf( - workspace / "netcdf/flow.structured.nc", format="NETCDF4", engine="netcdf4" -) - -# TODO need extended modflow 6 to run this simulation -# run the netcdf sim -# success, buff = sim.run_simulation(silent=True, report=True) -# assert success, pformat(buff) diff --git a/DEVELOPER.md b/DEVELOPER.md index 0f0943a75e..93aad9d022 100644 --- a/DEVELOPER.md +++ b/DEVELOPER.md @@ -223,8 +223,8 @@ All tutorials and examples should include a header with the following format: Contents above the `metadata` attribute can be auto-generated with `jupytext` by first-converting an example script to a notebook, and then back to a script (i.e. a round-trip conversion). For instance: ```sh -jupytext --from py --to ipynb .docs/Notebooks/your_example.py -jupytext --from ipynb --to py .docs/Notebooks/your_example.ipynb +jupytext --from py --to ipynb --set-formats ipynb,py:light .docs/Notebooks/your_example.py +jupytext --from ipynb --to py --set-formats ipynb,py:light .docs/Notebooks/your_example.ipynb ``` The `metadata` attribute should be filled by the example developer, and should contain at minimum: From 836571ef71543891f0cf8e06a466bddb1c21eba4 Mon Sep 17 00:00:00 2001 From: mjreno Date: Mon, 11 Aug 2025 09:59:49 -0400 Subject: [PATCH 06/44] more tutorial doc --- .docs/Notebooks/netcdf01_tutorial.py | 57 +++++++++++++++++++++------- 1 file changed, 43 insertions(+), 14 deletions(-) diff --git a/.docs/Notebooks/netcdf01_tutorial.py b/.docs/Notebooks/netcdf01_tutorial.py index 31bc974e5a..bd47849b6b 100644 --- a/.docs/Notebooks/netcdf01_tutorial.py +++ b/.docs/Notebooks/netcdf01_tutorial.py @@ -9,7 +9,7 @@ # format_version: '1.5' # jupytext_version: 1.17.2 # kernelspec: -# display_name: Python 3 +# display_name: Python 3 (ipykernel) # language: python # name: python3 # metadata: @@ -47,11 +47,11 @@ print(sys.version) print(f"flopy version: {flopy.__version__}") -# ## Define DNODATA constant +# ## Define `DNODATA` constant # -# DNODATA is an important constant for MODFLOW 6 timeseries grid input +# `DNODATA` is an important constant for MODFLOW 6 timeseries grid input # data. It signifies that the cell has no data defined for the time step -# in question. These cell values are discared and have no impact on the +# in question. These cell values are discarded and have no impact on the # simulation. # DNODATA constant @@ -62,10 +62,10 @@ # For the purposes of this tutorial, the specifics of this simulation # other than it is a candidate for NetCDF input are not a focus. It # is a NetCDF input candidate because it defines a candidate model type -# (GWF6) with packages that support NetCDF input parameters. +# (`GWF6`) with packages that support NetCDF input parameters. # -# A NetCDF dataset will be created from array data in the DIS, NPF and -# GHBG packages. Data will be copied from the package objects into dataset +# A NetCDF dataset will be created from array data in the `DIS`, `NPF` and +# `GHBG` packages. Data will be copied from the package objects into dataset # arrays. @@ -284,12 +284,22 @@ def _data_shape(shape): success, buff = sim.run_simulation(silent=True, report=True) assert success, pformat(buff) +# ## Create NetCDF based simulation +# +# Reset the simulation path and set the GWF name file `nc_filerecord` +# attribute to the name of the intended input NetCDF file. Display +# the resultant name file changes. + # create directory for netcdf sim # set model name file nc_filerecord attribute to export name sim.set_sim_path(workspace / "netcdf") gwf = sim.get_model("uzf01") gwf.name_file.nc_filerecord = "uzf01.structured.nc" sim.write_simulation() +with open(workspace / "netcdf" / "uzf01.nam", "r") as fh: + print(fh.read()) + +# ## Create dataset # create the dataset ds = xr.Dataset() @@ -297,7 +307,7 @@ def _data_shape(shape): # ## Access model NetCDF attributes # # Access model scoped NetCDF details by storing the dictionary -# returned from netcdf_info(). In particular, we need to set dataset +# returned from `netcdf_info()`. In particular, we need to set dataset # scoped attributes that are stored in the model netcdf info dict. # # First, retrieve and store the netcdf info dictionary and display @@ -312,6 +322,8 @@ def _data_shape(shape): for a in nc_info["attrs"]: ds.attrs[a] = nc_info["attrs"][a] +# ## Define dimensions relevant to NetCDF input file + # define dimensional info dis = gwf.modelgrid xoff = dis.xoffset @@ -326,6 +338,8 @@ def _data_shape(shape): ncol = dis.ncol dimmap = {"time": nstp, "z": nlay, "y": nrow, "x": ncol} +# ## Create dataset dimensions + # create dataset coordinate vars var_d = {"time": (["time"], time), "z": (["z"], z), "y": (["y"], y), "x": (["x"], x)} ds = ds.assign(var_d) @@ -333,7 +347,7 @@ def _data_shape(shape): # ## Access package NetCDF attributes # # Access package scoped NetCDF details by storing the dictionary returned -# from netcdf_info(). We need to set package variable attributes that are +# from `netcdf_info()`. We need to set package variable attributes that are # stored in the package netcdf info dict, but we also need other information # that is relevant to creating the variables themselves. # @@ -369,7 +383,7 @@ def _data_shape(shape): # file for the package array input. The ASCII will no longer defined the arrays- # instead the array names will be followed by the NETCDF keyword. # -# We will simply overwrite the entire MODFLOW 6 DIS package input file with the +# We will simply overwrite the entire MODFLOW 6 `DIS` package input file with the # following code block. # rewrite mf6 dis input to read from netcdf @@ -389,10 +403,13 @@ def _data_shape(shape): f.write(" botm NETCDF\n") f.write(" idomain NETCDF\n") f.write("END griddata\n") +with open(workspace / "netcdf" / "uzf01.dis", "r") as fh: + print(fh.read()) + # ## Update MODFLOW 6 package input file # -# Follow the same process as above for the NPF package. +# Follow the same process as above for the `NPF` package. # get npf package netcdf info npf = gwf.get_package("npf") @@ -414,10 +431,12 @@ def _data_shape(shape): f.write(" icelltype NETCDF\n") f.write(" k NETCDF\n") f.write("END griddata\n") +with open(workspace / "netcdf" / "uzf01.npf", "r") as fh: + print(fh.read()) # ## Update MODFLOW 6 package input file # -# Follow the same process as above for the GHBG package. The difference is +# Follow the same process as above for the `GHBG` package. The difference is # that this is PERIOD input and therefore stored as timeseries data in the # NetCDF file. As NETCDF timeseries and defined in terms of total number of # simulation steps, care must be taken in the translation of FloPy period @@ -443,9 +462,13 @@ def _data_shape(shape): istp = sum(gwf.modeltime.nstp[0:p]) ds["ghbg_0_cond"].values[istp] = ghbg.cond.get_data()[p] +# ## Display generated dataset + # show the dataset print(ds) +# ## Export generated dataset to NetCDF + # write dataset to netcdf ds.to_netcdf( workspace / "netcdf/uzf01.structured.nc", format="NETCDF4", engine="netcdf4" @@ -463,8 +486,14 @@ def _data_shape(shape): f.write(" bhead NETCDF\n") f.write(" cond NETCDF\n") f.write("END period 1\n") +with open(workspace / "netcdf" / "uzf01.ghbg", "r") as fh: + print(fh.read()) + +# ## Run MODFLOW 6 simulation with NetCDF input +# +# The simulation generated by this tutorial should be runnable by +# Extended MODFLOW 6, available from the nightly-build repository +# (linked above). -# TODO need extended modflow 6 to run this simulation -# run the netcdf sim # success, buff = sim.run_simulation(silent=True, report=True) # assert success, pformat(buff) From c100cf6b1cdf6703b9201f4693d378414c0b83b1 Mon Sep 17 00:00:00 2001 From: mjreno Date: Tue, 12 Aug 2025 10:48:14 -0400 Subject: [PATCH 07/44] discretization creates dataset --- .docs/Notebooks/netcdf01_tutorial.py | 41 ++++++++--------- flopy/discretization/grid.py | 14 ++++++ flopy/discretization/structuredgrid.py | 63 ++++++++++++++++++++++++++ 3 files changed, 96 insertions(+), 22 deletions(-) diff --git a/.docs/Notebooks/netcdf01_tutorial.py b/.docs/Notebooks/netcdf01_tutorial.py index bd47849b6b..d709f80222 100644 --- a/.docs/Notebooks/netcdf01_tutorial.py +++ b/.docs/Notebooks/netcdf01_tutorial.py @@ -62,7 +62,8 @@ # For the purposes of this tutorial, the specifics of this simulation # other than it is a candidate for NetCDF input are not a focus. It # is a NetCDF input candidate because it defines a candidate model type -# (`GWF6`) with packages that support NetCDF input parameters. +# (`GWF6`) with a structured discretization and packages that support +# NetCDF input parameters. # # A NetCDF dataset will be created from array data in the `DIS`, `NPF` and # `GHBG` packages. Data will be copied from the package objects into dataset @@ -274,10 +275,14 @@ def _data_shape(shape): return dataset +# ## Create simulation workspace + # create temporary directories temp_dir = TemporaryDirectory() workspace = Path(temp_dir.name) +# ## Write and run baseline simulation + # run the non-netcdf simulation sim = create_sim(ws=workspace) sim.write_simulation() @@ -300,9 +305,14 @@ def _data_shape(shape): print(fh.read()) # ## Create dataset +# +# Create the base xarray dataset from the modelgrid object. This +# will add required dimensions and coordinate variables to the +# dataset according to the grid specification. Modeltime is needed +# to for timeseries support. # create the dataset -ds = xr.Dataset() +ds = gwf.modelgrid.dataset(modeltime=gwf.modeltime) # ## Access model NetCDF attributes # @@ -322,27 +332,15 @@ def _data_shape(shape): for a in nc_info["attrs"]: ds.attrs[a] = nc_info["attrs"][a] -# ## Define dimensions relevant to NetCDF input file +# ## Map dataset dimension names to values # define dimensional info -dis = gwf.modelgrid -xoff = dis.xoffset -yoff = dis.yoffset -x = xoff + dis.xycenters[0] -y = yoff + dis.xycenters[1] -z = [float(x) for x in range(1, dis.nlay + 1)] -nstp = sum(gwf.modeltime.nstp) -time = gwf.modeltime.tslen -nlay = dis.nlay -nrow = dis.nrow -ncol = dis.ncol -dimmap = {"time": nstp, "z": nlay, "y": nrow, "x": ncol} - -# ## Create dataset dimensions - -# create dataset coordinate vars -var_d = {"time": (["time"], time), "z": (["z"], z), "y": (["y"], y), "x": (["x"], x)} -ds = ds.assign(var_d) +dimmap = { + "time": sum(gwf.modeltime.nstp), + "z": gwf.modelgrid.nlay, + "y": gwf.modelgrid.nrow, + "x": gwf.modelgrid.ncol, +} # ## Access package NetCDF attributes # @@ -406,7 +404,6 @@ def _data_shape(shape): with open(workspace / "netcdf" / "uzf01.dis", "r") as fh: print(fh.read()) - # ## Update MODFLOW 6 package input file # # Follow the same process as above for the `NPF` package. diff --git a/flopy/discretization/grid.py b/flopy/discretization/grid.py index 1d59801b05..107d58846a 100644 --- a/flopy/discretization/grid.py +++ b/flopy/discretization/grid.py @@ -1288,6 +1288,20 @@ def write_shapefile(self, filename="grid.shp", crs=None, prjfile=None, **kwargs) ) return + def dataset(self, mesh=None): + """ + Method to generate baseline Xarray dataset + + Parameters + ---------- + mesh + + Returns + ------- + Xarray dataset + """ + raise NotImplementedError("dataset must be defined in the child class") + # initialize grid from a grb file @classmethod def from_binary_grid_file(cls, file_path, verbose=False): diff --git a/flopy/discretization/structuredgrid.py b/flopy/discretization/structuredgrid.py index d132485ee2..7a917ebe1e 100644 --- a/flopy/discretization/structuredgrid.py +++ b/flopy/discretization/structuredgrid.py @@ -1770,6 +1770,69 @@ def get_plottable_layer_array(self, a, layer): assert plotarray.shape == required_shape, msg return plotarray + def dataset(self, modeltime=None, mesh=None): + import xarray as xr + + lenunits = {0: "m", 1: "ft", 2: "m", 3: "m"} + + ds = xr.Dataset() + + x = self.xoffset + self.xycenters[0] + y = self.yoffset + self.xycenters[1] + z = [float(x) for x in range(1, self.nlay + 1)] + + # set coordinate var bounds + x_bnds = [] + xv = self.xoffset + self.xyedges[0] + for idx, val in enumerate(xv): + if idx + 1 < len(xv): + bnd = [] + bnd.append(xv[idx]) + bnd.append(xv[idx + 1]) + x_bnds.append(bnd) + + y_bnds = [] + yv = self.yoffset + self.xyedges[1] + for idx, val in enumerate(yv): + if idx + 1 < len(yv): + bnd = [] + bnd.append(yv[idx + 1]) + bnd.append(yv[idx]) + y_bnds.append(bnd) + + # create dataset coordinate vars + var_d = { + "time": (["time"], modeltime.totim), + "z": (["z"], z), + "y": (["y"], y), + "x": (["x"], x), + } + ds = ds.assign(var_d) + + # create bound vars + var_d = {"x_bnds": (["x", "bnd"], x_bnds), "y_bnds": (["y", "bnd"], y_bnds)} + ds = ds.assign(var_d) + + ds["time"].attrs["calendar"] = "standard" + ds["time"].attrs["units"] = f"days since {modeltime.start_datetime}" + ds["time"].attrs["axis"] = "T" + ds["time"].attrs["standard_name"] = "time" + ds["time"].attrs["long_name"] = "time" + ds["z"].attrs["units"] = "layer" + ds["z"].attrs["long_name"] = "layer number" + ds["y"].attrs["units"] = lenunits[self.lenuni] + ds["y"].attrs["axis"] = "Y" + ds["y"].attrs["standard_name"] = "projection_y_coordinate" + ds["y"].attrs["long_name"] = "Northing" + ds["y"].attrs["bounds"] = "y_bnds" + ds["x"].attrs["units"] = lenunits[self.lenuni] + ds["x"].attrs["axis"] = "X" + ds["x"].attrs["standard_name"] = "projection_x_coordinate" + ds["x"].attrs["long_name"] = "Easting" + ds["x"].attrs["bounds"] = "x_bnds" + + return ds + def _set_structured_iverts(self): """ Build a list of the vertices that define each model cell and the x, y From 34e90bc282767a961b8d8db8683d25d010d6e302 Mon Sep 17 00:00:00 2001 From: mjreno Date: Tue, 12 Aug 2025 13:48:25 -0400 Subject: [PATCH 08/44] add layered mesh tutorial --- .docs/Notebooks/netcdf02_tutorial.py | 497 +++++++++++++++++++++++ flopy/discretization/grid.py | 2 +- flopy/discretization/structuredgrid.py | 197 ++++++--- flopy/discretization/unstructuredgrid.py | 14 + flopy/discretization/vertexgrid.py | 93 +++++ flopy/mf6/mfpackage.py | 33 +- 6 files changed, 775 insertions(+), 61 deletions(-) create mode 100644 .docs/Notebooks/netcdf02_tutorial.py diff --git a/.docs/Notebooks/netcdf02_tutorial.py b/.docs/Notebooks/netcdf02_tutorial.py new file mode 100644 index 0000000000..cb93e17174 --- /dev/null +++ b/.docs/Notebooks/netcdf02_tutorial.py @@ -0,0 +1,497 @@ +# --- +# jupyter: +# jupytext: +# cell_metadata_filter: -all +# formats: ipynb,py:light +# text_representation: +# extension: .py +# format_name: light +# format_version: '1.5' +# jupytext_version: 1.17.2 +# kernelspec: +# display_name: Python 3 (ipykernel) +# language: python +# name: python3 +# metadata: +# section: mf6 +# --- + +# # MODFLOW 6: Generate MODFLOW 6 NetCDF input from existing FloPy sim +# +# ## NetCDF tutorial 2: MODFLOW 6 UGRID layered mesh input file +# +# This tutorial demonstrates how to generate a MODFLOW 6 NetCDF file from +# an existing FloPy simulation. In the tutorial, candidate array data is +# added to an xarray dataset and annotated so that the generated NetCDF +# file can be read by MODFLOW 6 as model input. +# +# This tutorial generates a UGRID layered mesh NetCDF variant - for more +# information on supported MODFLOW 6 NetCDF formats see: +# [MODFLOW NetCDF Format](https://github.com/MODFLOW-ORG/modflow6/wiki/MODFLOW-NetCDF-Format). +# +# Note that NetCDF is only supported by the Extended version of MODFLOW 6. +# A nightly windows build of Extended MODFLOW 6 is available from +# [nightly build](https://github.com/MODFLOW-ORG/modflow6-nightly-build). + +# package import +import sys +from pathlib import Path +from pprint import pformat, pprint +from tempfile import TemporaryDirectory + +import numpy as np +import xarray as xr + +import flopy + +print(sys.version) +print(f"flopy version: {flopy.__version__}") + +# ## Define `DNODATA` constant +# +# `DNODATA` is an important constant for MODFLOW 6 timeseries grid input +# data. It signifies that the cell has no data defined for the time step +# in question. These cell values are discarded and have no impact on the +# simulation. + +# DNODATA constant +DNODATA = 3.0e30 + +# ## Define ASCII input baseline simulation +# +# For the purposes of this tutorial, the specifics of this simulation +# other than it is a candidate for NetCDF input are not a focus. It +# is a NetCDF input candidate because it defines a candidate model +# type (`GWF6`) with a vertex discretization and packages that support +# NetCDF input parameters. +# +# A NetCDF dataset will be created from array data in the `IC`, and +# 'GHBG' packages. Data will be copied from the package objects into +# dataset arrays. + + +# A FloPy ASCII base simulation that will be updated use netcdf inputs +def create_sim(ws): + name = "uzf02" + nlay = 5 + nrow = 10 + ncol = 10 + ncpl = nrow * ncol + delr = 1.0 + delc = 1.0 + nper = 5 + perlen = [10] * 5 + nstp = [5] * 5 + tsmult = len(perlen) * [1.0] + top = 25.0 + botm = [20.0, 15.0, 10.0, 5.0, 0.0] + strt = 20 + nouter, ninner = 100, 300 + hclose, rclose, relax = 1e-9, 1e-3, 0.97 + + # use flopy util to get disv arguments + disvkwargs = flopy.utils.gridutil.get_disv_kwargs( + nlay, nrow, ncol, delr, delc, top, botm + ) + + # Work up UZF data + iuzno = 0 + cellid = 0 + uzf_pkdat = [] + vks = 10.0 + thtr = 0.05 + thts = 0.30 + thti = 0.15 + eps = 3.5 + + for k in np.arange(nlay): + for i in np.arange(0, ncpl, 1): + if k == 0: + landflg = 1 + surfdp = 0.25 + else: + landflg = 0 + surfdp = 1e-6 + + if k == nlay - 1: + ivertcon = -1 + else: + ivertcon = iuzno + ncpl + + bndnm = "uzf" + f"{int(i + 1):03d}" + uzf_pkdat.append( + # iuzno cellid landflag ivertcn surfdp vks thtr thts thti eps [bndnm] + [ + iuzno, + (k, i), + landflg, + ivertcon, + surfdp, + vks, + thtr, + thts, + thti, + eps, + bndnm, + ] + ) + + iuzno += 1 + + extdp = 14.0 + extwc = 0.055 + pet = 0.001 + zero = 0.0 + uzf_spd = {} + for t in np.arange(0, nper, 1): + spd = [] + iuzno = 0 + for k in np.arange(nlay): + for i in np.arange(0, ncpl, 1): + if k == 0: + if t == 0: + finf = 0.15 + if t == 1: + finf = 0.15 + if t == 2: + finf = 0.15 + if t == 3: + finf = 0.15 + if t == 4: + finf = 0.15 + + spd.append([iuzno, finf, pet, extdp, extwc, zero, zero, zero]) + iuzno += 1 + + uzf_spd.update({t: spd}) + + # Work up the GHB / GHBG boundary + ghb_ids = [(ncol - 1) + i * ncol for i in range(nrow)] + abhead = np.full((nlay, ncpl), DNODATA, dtype=float) + acond = np.full((nlay, ncpl), DNODATA, dtype=float) + cond = 1e4 + for k in np.arange(3, 5, 1): + for i in ghb_ids: + abhead[k, i] = 14.0 + acond[k, i] = cond + + # build MODFLOW 6 files + sim = flopy.mf6.MFSimulation( + sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws + ) + + # time discretization + tdis_rc = [] + for i in range(nper): + tdis_rc.append((perlen[i], nstp[i], tsmult[i])) + + # create tdis package + tdis = flopy.mf6.ModflowTdis(sim, time_units="DAYS", nper=nper, perioddata=tdis_rc) + + # create gwf model + gwf = flopy.mf6.ModflowGwf( + sim, modelname=name, newtonoptions="NEWTON", save_flows=True + ) + + # create iterative model solution and register the gwf model with it + ims = flopy.mf6.ModflowIms( + sim, + print_option="SUMMARY", + complexity="MODERATE", + outer_dvclose=hclose, + outer_maximum=nouter, + under_relaxation="DBD", + inner_maximum=ninner, + inner_dvclose=hclose, + rcloserecord=rclose, + linear_acceleration="BICGSTAB", + scaling_method="NONE", + reordering_method="NONE", + relaxation_factor=relax, + ) + sim.register_ims_package(ims, [gwf.name]) + + # disv + disv = flopy.mf6.ModflowGwfdisv(gwf, **disvkwargs) + + # initial conditions + ic = flopy.mf6.ModflowGwfic(gwf, strt=strt) + + # node property flow + npf = flopy.mf6.ModflowGwfnpf(gwf, save_flows=True, icelltype=1, k=0.1, k33=1) + + # aquifer storage + sto = flopy.mf6.ModflowGwfsto(gwf, iconvert=1, ss=1e-5, sy=0.2, transient=True) + + # general-head boundary + ghb = flopy.mf6.ModflowGwfghbg(gwf, print_flows=True, bhead=abhead, cond=acond) + + # unsaturated-zone flow + etobs = [] + i = 4 + # Seems as though these are 1-based and not 0-based, like the rest of flopy + for j in list(np.arange(40, 50, 1)) + list(np.arange(140, 150, 1)): + etobs.append(("uzet_" + str(j + 1), "uzet", (j,))) + etobs.append(("uzf-gwet_" + str(j + 1), "uzf-gwet", (j,))) + + uzf_obs = {f"{name}.uzfobs": etobs} + + uzf = flopy.mf6.ModflowGwfuzf( + gwf, + print_flows=True, + save_flows=True, + simulate_et=True, + simulate_gwseep=True, + linear_gwet=True, + observations=uzf_obs, + boundnames=True, + ntrailwaves=15, + nwavesets=40, + nuzfcells=len(uzf_pkdat), + packagedata=uzf_pkdat, + perioddata=uzf_spd, + budget_filerecord=f"{name}.uzf.bud", + ) + + # output control + oc = flopy.mf6.ModflowGwfoc( + gwf, + budget_filerecord=f"{name}.cbc", + head_filerecord=f"{name}.hds", + headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")], + saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], + printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], + filename=f"{name}.oc", + ) + + # Print human-readable heads + obs_lst = [] + for k in np.arange(0, 1, 1): + for i in np.arange(40, 50, 1): + obs_lst.append(["obs_" + str(i + 1), "head", (k, i)]) + + obs_dict = {f"{name}.obs.csv": obs_lst} + obs = flopy.mf6.ModflowUtlobs(gwf, pname="head_obs", digits=20, continuous=obs_dict) + + return sim + + +# ## Create helper function to update dataset +# +# This function updates an Xarray dataset to add variables described +# in a FloPy provided dictionary. +# +# A dimension map variable relates FloPy and NetCDF dimensions names. + + +# A subroutine that can update an xarray dataset with package +# netcdf information stored in a dict +def add_netcdf_vars(dataset, nc_info, dimmap): + def _data_shape(shape): + dims_l = [] + for d in shape: + dims_l.append(dimmap[d]) + + return dims_l + + for v in nc_info: + varname = nc_info[v]["varname"] + layered = varname.split("/") + if len(layered) > 1: + l = layered[1][6] + varname = f"{layered[0]}_l{l}" + data = np.full( + _data_shape(nc_info[v]["netcdf_shape"]), + nc_info[v]["attrs"]["_FillValue"], + dtype=nc_info[v]["xarray_type"], + ) + var_d = {varname: (nc_info[v]["netcdf_shape"], data)} + dataset = dataset.assign(var_d) + for a in nc_info[v]["attrs"]: + dataset[varname].attrs[a] = nc_info[v]["attrs"][a] + + return dataset + + +# ## Create simulation workspace + +# create temporary directories +temp_dir = TemporaryDirectory() +workspace = Path(temp_dir.name) + +# ## Write and run baseline simulation + +# run the non-netcdf simulation +sim = create_sim(ws=workspace) +sim.write_simulation() +success, buff = sim.run_simulation(silent=True, report=True) +assert success, pformat(buff) + +# ## Create NetCDF based simulation +# +# Reset the simulation path and set the GWF name file `nc_filerecord` +# attribute to the name of the intended input NetCDF file. Display +# the resultant name file changes. + +# create directory for netcdf sim +# set model name file nc_filerecord attribute to export name +sim.set_sim_path(workspace / "netcdf") +gwf = sim.get_model("uzf02") +gwf.name_file.nc_filerecord = "uzf02.layered.nc" +sim.write_simulation() +with open(workspace / "netcdf" / "uzf02.nam", "r") as fh: + print(fh.read()) + +# ## Create dataset +# +# Create the base xarray dataset from the modelgrid object. This +# will add required dimensions and coordinate variables to the +# dataset according to the grid specification. Modeltime is needed +# to for timeseries support. + +# create the dataset +ds = gwf.modelgrid.dataset(modeltime=gwf.modeltime, mesh="layered") + +# ## Access model NetCDF attributes +# +# Access model scoped NetCDF details by storing the dictionary +# returned from `netcdf_info()`. In particular, we need to set dataset +# scoped attributes that are stored in the model netcdf info dict. +# +# First, retrieve and store the netcdf info dictionary and display +# its contents. Then, in the following step, update the dataset with +# the model scoped attributes defined in the dictionary. + +# get model netcdf info +nc_info = gwf.netcdf_info(mesh="layered") +pprint(nc_info) + +# update dataset with required attributes +for a in nc_info["attrs"]: + ds.attrs[a] = nc_info["attrs"][a] + +# ## Map dataset dimension names to values + +# define dimensional info +dimmap = { + "time": sum(gwf.modeltime.nstp), + "z": gwf.modelgrid.nlay, + "nmesh_face": gwf.modelgrid.ncpl, +} + +# ## Access package NetCDF attributes +# +# Access package scoped NetCDF details by storing the dictionary returned +# from `netcdf_info()`. We need to set package variable attributes that are +# stored in the package netcdf info dict, but we also need other information +# that is relevant to creating the variables themselves. +# +# The contents of the info dictionary are shown and then, in the following +# step, the dictionary and the dataset are passed to a helper routine that +# create the intended array variables. + +# get ic package netcdf info +ic = gwf.get_package("ic") +nc_info = ic.netcdf_info(mesh="layered") +pprint(nc_info) + +# create ic dataset variables +ds = add_netcdf_vars(ds, nc_info, dimmap) + +# ## Update array data +# +# We have created dataset array variables for the package but they do not yet +# define the expected input data for MODFLOW 6. We will take advantage of the +# existing simulation objects and update the dataset. + +# update dataset from ic strt array +for l in range(gwf.modelgrid.nlay): + ds[f"ic_strt_l{l + 1}"].values = ic.strt.get_data()[l].flatten() + +# ## Update MODFLOW 6 package input file +# +# MODFLOW 6 input data for the package is now in the dataset. Once the NetCDF +# file is generated, we need to configure MODFLOW 6 so that it looks to that +# file for the package array input. The ASCII will no longer defined the arrays- +# instead the array names will be followed by the NETCDF keyword. +# +# We will simply overwrite the entire MODFLOW 6 `IC` package input file with the +# following code block. + +# rewrite mf6 ic input to read from netcdf +with open(workspace / "netcdf" / "uzf02.ic", "w") as f: + f.write("BEGIN options\n") + f.write("END options\n\n") + f.write("BEGIN griddata\n") + f.write(" strt NETCDF\n") + f.write("END griddata\n") +with open(workspace / "netcdf" / "uzf02.ic", "r") as fh: + print(fh.read()) + +# ## Update MODFLOW 6 package input file +# +# Follow the same process as above for the `GHBG` package. The difference is +# that this is PERIOD input and therefore stored as timeseries data in the +# NetCDF file. As NETCDF timeseries and defined in terms of total number of +# simulation steps, care must be taken in the translation of FloPy period +# data to the timeseries. + +# get ghbg package netcdf info +ghbg = gwf.get_package("ghbg_0") +nc_info = ghbg.netcdf_info(mesh="layered") +pprint(nc_info) + +# create ghbg dataset variables +ds = add_netcdf_vars(ds, nc_info, dimmap) + +# update bhead netcdf array from flopy perioddata +# timeseries step index is first of stress period +for p in ghbg.bhead.get_data(): + if ghbg.bhead.get_data()[p] is not None: + istp = sum(gwf.modeltime.nstp[0:p]) + for l in range(gwf.modelgrid.nlay): + ds[f"ghbg_0_bhead_l{l + 1}"].values[istp] = ghbg.bhead.get_data()[p][ + l + ].flatten() + +# update cond netcdf array from flopy perioddata +# timeseries step index is first of stress period +for p in ghbg.cond.get_data(): + if ghbg.cond.get_data()[p] is not None: + istp = sum(gwf.modeltime.nstp[0:p]) + for l in range(gwf.modelgrid.nlay): + ds[f"ghbg_0_cond_l{l + 1}"].values[istp] = ghbg.cond.get_data()[p][ + l + ].flatten() + +# ## Display generated dataset + +# show the dataset +print(ds) + +# ## Export generated dataset to NetCDF + +# write dataset to netcdf +ds.to_netcdf(workspace / "netcdf/uzf02.layered.nc", format="NETCDF4", engine="netcdf4") + +# rewrite mf6 ghbg input to read from netcdf +with open(workspace / "netcdf/uzf02.ghbg", "w") as f: + f.write("BEGIN options\n") + f.write(" READARRAYGRID\n") + f.write(" PRINT_FLOWS\n") + f.write("END options\n\n") + f.write("BEGIN period 1\n") + f.write(" bhead NETCDF\n") + f.write(" cond NETCDF\n") + f.write("END period 1\n") +with open(workspace / "netcdf" / "uzf02.ghbg", "r") as fh: + print(fh.read()) + +# ## Run MODFLOW 6 simulation with NetCDF input +# +# The simulation generated by this tutorial should be runnable by +# Extended MODFLOW 6, available from the nightly-build repository +# (linked above). + +# success, buff = sim.run_simulation(silent=True, report=True) +# assert success, pformat(buff) diff --git a/flopy/discretization/grid.py b/flopy/discretization/grid.py index 107d58846a..e4416ba377 100644 --- a/flopy/discretization/grid.py +++ b/flopy/discretization/grid.py @@ -1288,7 +1288,7 @@ def write_shapefile(self, filename="grid.shp", crs=None, prjfile=None, **kwargs) ) return - def dataset(self, mesh=None): + def dataset(self, modeltime=None, mesh=None): """ Method to generate baseline Xarray dataset diff --git a/flopy/discretization/structuredgrid.py b/flopy/discretization/structuredgrid.py index 7a917ebe1e..f2c39cdf5c 100644 --- a/flopy/discretization/structuredgrid.py +++ b/flopy/discretization/structuredgrid.py @@ -1773,63 +1773,154 @@ def get_plottable_layer_array(self, a, layer): def dataset(self, modeltime=None, mesh=None): import xarray as xr + FILLNA_INT32 = np.int32(-2147483647) + FILLNA_DBL = 9.96920996838687e36 lenunits = {0: "m", 1: "ft", 2: "m", 3: "m"} ds = xr.Dataset() - x = self.xoffset + self.xycenters[0] - y = self.yoffset + self.xycenters[1] - z = [float(x) for x in range(1, self.nlay + 1)] - - # set coordinate var bounds - x_bnds = [] - xv = self.xoffset + self.xyedges[0] - for idx, val in enumerate(xv): - if idx + 1 < len(xv): - bnd = [] - bnd.append(xv[idx]) - bnd.append(xv[idx + 1]) - x_bnds.append(bnd) - - y_bnds = [] - yv = self.yoffset + self.xyedges[1] - for idx, val in enumerate(yv): - if idx + 1 < len(yv): - bnd = [] - bnd.append(yv[idx + 1]) - bnd.append(yv[idx]) - y_bnds.append(bnd) - - # create dataset coordinate vars - var_d = { - "time": (["time"], modeltime.totim), - "z": (["z"], z), - "y": (["y"], y), - "x": (["x"], x), - } - ds = ds.assign(var_d) - - # create bound vars - var_d = {"x_bnds": (["x", "bnd"], x_bnds), "y_bnds": (["y", "bnd"], y_bnds)} - ds = ds.assign(var_d) - - ds["time"].attrs["calendar"] = "standard" - ds["time"].attrs["units"] = f"days since {modeltime.start_datetime}" - ds["time"].attrs["axis"] = "T" - ds["time"].attrs["standard_name"] = "time" - ds["time"].attrs["long_name"] = "time" - ds["z"].attrs["units"] = "layer" - ds["z"].attrs["long_name"] = "layer number" - ds["y"].attrs["units"] = lenunits[self.lenuni] - ds["y"].attrs["axis"] = "Y" - ds["y"].attrs["standard_name"] = "projection_y_coordinate" - ds["y"].attrs["long_name"] = "Northing" - ds["y"].attrs["bounds"] = "y_bnds" - ds["x"].attrs["units"] = lenunits[self.lenuni] - ds["x"].attrs["axis"] = "X" - ds["x"].attrs["standard_name"] = "projection_x_coordinate" - ds["x"].attrs["long_name"] = "Easting" - ds["x"].attrs["bounds"] = "x_bnds" + if mesh and mesh.upper() == "LAYERED": + # mesh container variable + ds = ds.assign({"mesh": ([], np.int32(1))}) + ds["mesh"].attrs["cf_role"] = "mesh_topology" + ds["mesh"].attrs["long_name"] = "2D mesh topology" + ds["mesh"].attrs["topology_dimension"] = np.int32(2) + ds["mesh"].attrs["face_dimension"] = "nmesh_face" + ds["mesh"].attrs["node_coordinates"] = "mesh_node_x mesh_node_y" + ds["mesh"].attrs["face_coordinates"] = "mesh_face_x mesh_face_y" + ds["mesh"].attrs["face_node_connectivity"] = "mesh_face_nodes" + + # mesh node x and y + var_d = { + "mesh_node_x": (["nmesh_node"], self.verts[:, 0]), + "mesh_node_y": (["nmesh_node"], self.verts[:, 1]), + } + ds = ds.assign(var_d) + ds["mesh_node_x"].attrs["units"] = lenunits[self.lenuni] + ds["mesh_node_x"].attrs["standard_name"] = "projection_x_coordinate" + ds["mesh_node_x"].attrs["long_name"] = "Easting" + ds["mesh_node_y"].attrs["units"] = lenunits[self.lenuni] + ds["mesh_node_y"].attrs["standard_name"] = "projection_y_coordinate" + ds["mesh_node_y"].attrs["long_name"] = "Northing" + + # mesh face x and y + x_bnds = [] + x_verts = self.verts[:, 0].reshape(self.nrow + 1, self.ncol + 1) + for i in range(self.nrow): + if i + 1 > self.nrow: + break + for j in range(self.ncol): + if j + 1 <= self.ncol: + bnd = [] + bnd.append(x_verts[i + 1][j]) + bnd.append(x_verts[i + 1][j + 1]) + bnd.append(x_verts[i][j + 1]) + bnd.append(x_verts[i][j]) + x_bnds.append(bnd) + + y_bnds = [] + y_verts = self.verts[:, 1].reshape(self.nrow + 1, self.ncol + 1) + for i in range(self.nrow): + if i + 1 > self.nrow: + break + for j in range(self.ncol): + if j + 1 <= self.ncol: + bnd = [] + bnd.append(y_verts[i + 1][j]) + bnd.append(y_verts[i + 1][j + 1]) + bnd.append(y_verts[i][j + 1]) + bnd.append(y_verts[i][j]) + y_bnds.append(bnd) + + var_d = { + "mesh_face_x": (["nmesh_face"], self.xcellcenters.flatten()), + "mesh_face_xbnds": (["nmesh_face", "max_nmesh_face_nodes"], x_bnds), + "mesh_face_y": (["nmesh_face"], self.ycellcenters.flatten()), + "mesh_face_ybnds": (["nmesh_face", "max_nmesh_face_nodes"], y_bnds), + } + ds = ds.assign(var_d) + ds["mesh_face_x"].attrs["units"] = lenunits[self.lenuni] + ds["mesh_face_x"].attrs["standard_name"] = "projection_x_coordinate" + ds["mesh_face_x"].attrs["long_name"] = "Easting" + ds["mesh_face_x"].attrs["bounds"] = "mesh_face_xbnds" + ds["mesh_face_y"].attrs["units"] = lenunits[self.lenuni] + ds["mesh_face_y"].attrs["standard_name"] = "projection_y_coordinate" + ds["mesh_face_y"].attrs["long_name"] = "Northing" + ds["mesh_face_y"].attrs["bounds"] = "mesh_face_ybnds" + + # mesh face nodes + max_face_nodes = 4 + face_nodes = [] + for r in self.iverts: + nodes = [np.int32(x + 1) for x in r] + nodes.reverse() + face_nodes.append(nodes) + + var_d = { + "mesh_face_nodes": (["nmesh_face", "max_nmesh_face_nodes"], face_nodes), + } + ds = ds.assign(var_d) + ds["mesh_face_nodes"].attrs["cf_role"] = "face_node_connectivity" + ds["mesh_face_nodes"].attrs["long_name"] = ( + "Vertices bounding cell (counterclockwise)" + ) + ds["mesh_face_nodes"].attrs["_FillValue"] = FILLNA_INT32 + ds["mesh_face_nodes"].attrs["start_index"] = np.int32(1) + + elif mesh is None: + x = self.xoffset + self.xycenters[0] + y = self.yoffset + self.xycenters[1] + z = [float(x) for x in range(1, self.nlay + 1)] + + # set coordinate var bounds + x_bnds = [] + xv = self.xoffset + self.xyedges[0] + for idx, val in enumerate(xv): + if idx + 1 < len(xv): + bnd = [] + bnd.append(xv[idx]) + bnd.append(xv[idx + 1]) + x_bnds.append(bnd) + + y_bnds = [] + yv = self.yoffset + self.xyedges[1] + for idx, val in enumerate(yv): + if idx + 1 < len(yv): + bnd = [] + bnd.append(yv[idx + 1]) + bnd.append(yv[idx]) + y_bnds.append(bnd) + + # create dataset coordinate vars + var_d = { + "time": (["time"], modeltime.totim), + "z": (["z"], z), + "y": (["y"], y), + "x": (["x"], x), + } + ds = ds.assign(var_d) + + # create bound vars + var_d = {"x_bnds": (["x", "bnd"], x_bnds), "y_bnds": (["y", "bnd"], y_bnds)} + ds = ds.assign(var_d) + + ds["time"].attrs["calendar"] = "standard" + ds["time"].attrs["units"] = f"days since {modeltime.start_datetime}" + ds["time"].attrs["axis"] = "T" + ds["time"].attrs["standard_name"] = "time" + ds["time"].attrs["long_name"] = "time" + ds["z"].attrs["units"] = "layer" + ds["z"].attrs["long_name"] = "layer number" + ds["y"].attrs["units"] = lenunits[self.lenuni] + ds["y"].attrs["axis"] = "Y" + ds["y"].attrs["standard_name"] = "projection_y_coordinate" + ds["y"].attrs["long_name"] = "Northing" + ds["y"].attrs["bounds"] = "y_bnds" + ds["x"].attrs["units"] = lenunits[self.lenuni] + ds["x"].attrs["axis"] = "X" + ds["x"].attrs["standard_name"] = "projection_x_coordinate" + ds["x"].attrs["long_name"] = "Easting" + ds["x"].attrs["bounds"] = "x_bnds" return ds diff --git a/flopy/discretization/unstructuredgrid.py b/flopy/discretization/unstructuredgrid.py index f6e588e656..fd403f6383 100644 --- a/flopy/discretization/unstructuredgrid.py +++ b/flopy/discretization/unstructuredgrid.py @@ -957,6 +957,20 @@ def get_plottable_layer_shape(self, layer=None): shp = (self.ncpl[layer],) return shp + def dataset(self, modeltime=None, mesh=None): + """ + Method to generate baseline Xarray dataset + + Parameters + ---------- + mesh + + Returns + ------- + Xarray dataset + """ + raise NotImplementedError("NetCDF currently unsupported for Unstructured grids") + @staticmethod def ncpl_from_ihc(ihc, iac): """ diff --git a/flopy/discretization/vertexgrid.py b/flopy/discretization/vertexgrid.py index ebda12c87c..4242b1c5f1 100644 --- a/flopy/discretization/vertexgrid.py +++ b/flopy/discretization/vertexgrid.py @@ -600,6 +600,99 @@ def get_plottable_layer_array(self, a, layer): assert plotarray.shape == required_shape, msg return plotarray + def dataset(self, modeltime=None, mesh=None): + import xarray as xr + + FILLNA_INT32 = np.int32(-2147483647) + FILLNA_DBL = 9.96920996838687e36 + lenunits = {0: "m", 1: "ft", 2: "m", 3: "m"} + + ds = xr.Dataset() + + # mesh container variable + ds = ds.assign({"mesh": ([], np.int32(1))}) + ds["mesh"].attrs["cf_role"] = "mesh_topology" + ds["mesh"].attrs["long_name"] = "2D mesh topology" + ds["mesh"].attrs["topology_dimension"] = np.int32(2) + ds["mesh"].attrs["face_dimension"] = "nmesh_face" + ds["mesh"].attrs["node_coordinates"] = "mesh_node_x mesh_node_y" + ds["mesh"].attrs["face_coordinates"] = "mesh_face_x mesh_face_y" + ds["mesh"].attrs["face_node_connectivity"] = "mesh_face_nodes" + + # mesh node x and y + var_d = { + "mesh_node_x": (["nmesh_node"], self.verts[:, 0]), + "mesh_node_y": (["nmesh_node"], self.verts[:, 1]), + } + ds = ds.assign(var_d) + ds["mesh_node_x"].attrs["units"] = lenunits[self.lenuni] + ds["mesh_node_x"].attrs["standard_name"] = "projection_x_coordinate" + ds["mesh_node_x"].attrs["long_name"] = "Easting" + ds["mesh_node_y"].attrs["units"] = lenunits[self.lenuni] + ds["mesh_node_y"].attrs["standard_name"] = "projection_y_coordinate" + ds["mesh_node_y"].attrs["long_name"] = "Northing" + + # determine max number of cell vertices + cell_nverts = [cell2d[3] for cell2d in self.cell2d] + max_face_nodes = max(cell_nverts) + + # mesh face x and y + x_bnds = [] + for x in self.xvertices: + x = x[::-1] + if len(x) < max_face_nodes: + # TODO: set fill value? + x.extend([FILLNA_INT32] * (max_face_nodes - len(x))) + x_bnds.append(x) + + y_bnds = [] + for y in self.yvertices: + y = y[::-1] + if len(y) < max_face_nodes: + # TODO: set fill value? + y.extend([FILLNA_INT32] * (max_face_nodes - len(y))) + y_bnds.append(y) + + var_d = { + "mesh_face_x": (["nmesh_face"], self.xcellcenters), + "mesh_face_xbnds": (["nmesh_face", "max_nmesh_face_nodes"], x_bnds), + "mesh_face_y": (["nmesh_face"], self.ycellcenters), + "mesh_face_ybnds": (["nmesh_face", "max_nmesh_face_nodes"], y_bnds), + } + ds = ds.assign(var_d) + ds["mesh_face_x"].attrs["units"] = lenunits[self.lenuni] + ds["mesh_face_x"].attrs["standard_name"] = "projection_x_coordinate" + ds["mesh_face_x"].attrs["long_name"] = "Easting" + ds["mesh_face_x"].attrs["bounds"] = "mesh_face_xbnds" + ds["mesh_face_y"].attrs["units"] = lenunits[self.lenuni] + ds["mesh_face_y"].attrs["standard_name"] = "projection_y_coordinate" + ds["mesh_face_y"].attrs["long_name"] = "Northing" + ds["mesh_face_y"].attrs["bounds"] = "mesh_face_ybnds" + + # mesh face nodes + face_nodes = [] + for idx, r in enumerate(self.cell2d): + nodes = self.cell2d[idx][4 : 4 + r[3]] + nodes = [np.int32(x + 1) for x in nodes] + nodes.reverse() + if len(nodes) < max_face_nodes: + # TODO set fill value? + nodes.extend([FILLNA_INT32] * (max_face_nodes - len(nodes))) + face_nodes.append(nodes) + + var_d = { + "mesh_face_nodes": (["nmesh_face", "max_nmesh_face_nodes"], face_nodes), + } + ds = ds.assign(var_d) + ds["mesh_face_nodes"].attrs["cf_role"] = "face_node_connectivity" + ds["mesh_face_nodes"].attrs["long_name"] = ( + "Vertices bounding cell (counterclockwise)" + ) + ds["mesh_face_nodes"].attrs["_FillValue"] = FILLNA_INT32 + ds["mesh_face_nodes"].attrs["start_index"] = np.int32(1) + + return ds + # initialize grid from a grb file @classmethod def from_binary_grid_file(cls, file_path, verbose=False): diff --git a/flopy/mf6/mfpackage.py b/flopy/mf6/mfpackage.py index d559ee1d17..746b4a3ddd 100644 --- a/flopy/mf6/mfpackage.py +++ b/flopy/mf6/mfpackage.py @@ -3463,15 +3463,34 @@ def _add_entry(tagname, iaux=None, layer=None): dims = [] if data_item.shape[0] == 'nodes': if data_item.block_name == "griddata": - dims += ["x", "y", "z"] + if mesh is None: + dims += ["x", "y", "z"] + elif mesh.upper() == "LAYERED": + dims += ["nmesh_face"] elif data_item.block_name == "period": - dims += ["x", "y", "z", "time"] + if mesh is None: + dims += ["x", "y", "z", "time"] + elif mesh.upper() == "LAYERED": + dims += ["nmesh_face", "time"] else: - dimmap = {"nlay": "z", "nrow": "y", "ncol": "x"} - for s in data_item.shape: - for k, v in dimmap.items(): - s = s.replace(k, v) - dims.append(s) + if mesh is None: + dimmap = {"nlay": "z", "nrow": "y", "ncol": "x"} + for s in data_item.shape: + for k, v in dimmap.items(): + s = s.replace(k, v) + dims.append(s) + elif mesh.upper() == "LAYERED": + if ( + len(data_item.shape) == 3 or + len(data_item.shape) == 2 or + data_item.shape[0] == 'ncpl' + ): + dims.append("nmesh_face") + elif data_item.shape[0] == 'ncol': + dims.append("x") + elif data_item.shape[0] == 'nrow': + dims.append("y") + a["netcdf_shape"] = dims[::-1] # add variable attributes dictionary From d57d282969a03b3e7830cb5e7ffc94b8f8926836 Mon Sep 17 00:00:00 2001 From: mjreno Date: Tue, 12 Aug 2025 16:46:06 -0400 Subject: [PATCH 09/44] some cleanup --- .docs/Notebooks/netcdf01_tutorial.py | 14 +- .docs/Notebooks/netcdf02_tutorial.py | 16 +- flopy/discretization/structuredgrid.py | 287 +- flopy/discretization/vertexgrid.py | 7 + flopy/mf6/tmp/2/mfpackage.py | 3801 ++++++++++++++++++++++++ flopy/mf6/tmp/mfmodel.py | 2229 ++++++++++++++ flopy/mf6/tmp/mfpackage.py | 3666 +++++++++++++++++++++++ flopy/mf6/tmp/mfstructure.py | 2113 +++++++++++++ flopy/mf6/tmp/ruff/2/mfmodel.py | 2256 ++++++++++++++ flopy/mf6/tmp/ruff/2/mfpackage.py | 3720 +++++++++++++++++++++++ flopy/mf6/tmp/ruff/mfmodel.py | 2143 +++++++++++++ flopy/mf6/tmp/ruff/mfpackage.py | 3497 ++++++++++++++++++++++ 12 files changed, 23595 insertions(+), 154 deletions(-) create mode 100644 flopy/mf6/tmp/2/mfpackage.py create mode 100644 flopy/mf6/tmp/mfmodel.py create mode 100644 flopy/mf6/tmp/mfpackage.py create mode 100644 flopy/mf6/tmp/mfstructure.py create mode 100644 flopy/mf6/tmp/ruff/2/mfmodel.py create mode 100644 flopy/mf6/tmp/ruff/2/mfpackage.py create mode 100644 flopy/mf6/tmp/ruff/mfmodel.py create mode 100644 flopy/mf6/tmp/ruff/mfpackage.py diff --git a/.docs/Notebooks/netcdf01_tutorial.py b/.docs/Notebooks/netcdf01_tutorial.py index d709f80222..cd138fd3f3 100644 --- a/.docs/Notebooks/netcdf01_tutorial.py +++ b/.docs/Notebooks/netcdf01_tutorial.py @@ -244,10 +244,10 @@ def create_sim(ws): # ## Create helper function to update dataset # -# This function updates an Xarray dataset to add variables described +# This function updates an xarray dataset to add variables described # in a FloPy provided dictionary. # -# A dimension map variable relates FloPy and NetCDF dimensions names. +# The dimmap variable relates NetCDF dimension names to a value. # A subroutine that can update an xarray dataset with package @@ -291,7 +291,7 @@ def _data_shape(shape): # ## Create NetCDF based simulation # -# Reset the simulation path and set the GWF name file `nc_filerecord` +# Reset the simulation path and set the `GWF` name file `nc_filerecord` # attribute to the name of the intended input NetCDF file. Display # the resultant name file changes. @@ -309,7 +309,7 @@ def _data_shape(shape): # Create the base xarray dataset from the modelgrid object. This # will add required dimensions and coordinate variables to the # dataset according to the grid specification. Modeltime is needed -# to for timeseries support. +# for timeseries support. # create the dataset ds = gwf.modelgrid.dataset(modeltime=gwf.modeltime) @@ -378,8 +378,8 @@ def _data_shape(shape): # # MODFLOW 6 input data for the package is now in the dataset. Once the NetCDF # file is generated, we need to configure MODFLOW 6 so that it looks to that -# file for the package array input. The ASCII will no longer defined the arrays- -# instead the array names will be followed by the NETCDF keyword. +# file for the package array input. The ASCII file will no longer defined the +# arrays- instead the array names will be followed by the NETCDF keyword. # # We will simply overwrite the entire MODFLOW 6 `DIS` package input file with the # following code block. @@ -435,7 +435,7 @@ def _data_shape(shape): # # Follow the same process as above for the `GHBG` package. The difference is # that this is PERIOD input and therefore stored as timeseries data in the -# NetCDF file. As NETCDF timeseries and defined in terms of total number of +# NetCDF file. As NETCDF timeseries are defined in terms of total number of # simulation steps, care must be taken in the translation of FloPy period # data to the timeseries. diff --git a/.docs/Notebooks/netcdf02_tutorial.py b/.docs/Notebooks/netcdf02_tutorial.py index cb93e17174..68caae2892 100644 --- a/.docs/Notebooks/netcdf02_tutorial.py +++ b/.docs/Notebooks/netcdf02_tutorial.py @@ -66,7 +66,7 @@ # NetCDF input parameters. # # A NetCDF dataset will be created from array data in the `IC`, and -# 'GHBG' packages. Data will be copied from the package objects into +# `GHBG` packages. Data will be copied from the package objects into # dataset arrays. @@ -278,10 +278,10 @@ def create_sim(ws): # ## Create helper function to update dataset # -# This function updates an Xarray dataset to add variables described +# This function updates an xarray dataset to add variables described # in a FloPy provided dictionary. # -# A dimension map variable relates FloPy and NetCDF dimensions names. +# The dimmap variable relates NetCDF dimension names to a value. # A subroutine that can update an xarray dataset with package @@ -329,7 +329,7 @@ def _data_shape(shape): # ## Create NetCDF based simulation # -# Reset the simulation path and set the GWF name file `nc_filerecord` +# Reset the simulation path and set the `GWF` name file `nc_filerecord` # attribute to the name of the intended input NetCDF file. Display # the resultant name file changes. @@ -347,7 +347,7 @@ def _data_shape(shape): # Create the base xarray dataset from the modelgrid object. This # will add required dimensions and coordinate variables to the # dataset according to the grid specification. Modeltime is needed -# to for timeseries support. +# for timeseries support. # create the dataset ds = gwf.modelgrid.dataset(modeltime=gwf.modeltime, mesh="layered") @@ -412,8 +412,8 @@ def _data_shape(shape): # # MODFLOW 6 input data for the package is now in the dataset. Once the NetCDF # file is generated, we need to configure MODFLOW 6 so that it looks to that -# file for the package array input. The ASCII will no longer defined the arrays- -# instead the array names will be followed by the NETCDF keyword. +# file for the package array input. The ASCII file will no longer defined the +# arrays- instead the array names will be followed by the NETCDF keyword. # # We will simply overwrite the entire MODFLOW 6 `IC` package input file with the # following code block. @@ -432,7 +432,7 @@ def _data_shape(shape): # # Follow the same process as above for the `GHBG` package. The difference is # that this is PERIOD input and therefore stored as timeseries data in the -# NetCDF file. As NETCDF timeseries and defined in terms of total number of +# NetCDF file. As NETCDF timeseries are defined in terms of total number of # simulation steps, care must be taken in the translation of FloPy period # data to the timeseries. diff --git a/flopy/discretization/structuredgrid.py b/flopy/discretization/structuredgrid.py index f2c39cdf5c..787dc965cd 100644 --- a/flopy/discretization/structuredgrid.py +++ b/flopy/discretization/structuredgrid.py @@ -1773,156 +1773,165 @@ def get_plottable_layer_array(self, a, layer): def dataset(self, modeltime=None, mesh=None): import xarray as xr - FILLNA_INT32 = np.int32(-2147483647) - FILLNA_DBL = 9.96920996838687e36 - lenunits = {0: "m", 1: "ft", 2: "m", 3: "m"} + if modeltime is None: + raise ValueError("modeltime required for dataset timeseries") ds = xr.Dataset() + ds.attrs["modflow_grid"] = "STRUCTURED" if mesh and mesh.upper() == "LAYERED": - # mesh container variable - ds = ds.assign({"mesh": ([], np.int32(1))}) - ds["mesh"].attrs["cf_role"] = "mesh_topology" - ds["mesh"].attrs["long_name"] = "2D mesh topology" - ds["mesh"].attrs["topology_dimension"] = np.int32(2) - ds["mesh"].attrs["face_dimension"] = "nmesh_face" - ds["mesh"].attrs["node_coordinates"] = "mesh_node_x mesh_node_y" - ds["mesh"].attrs["face_coordinates"] = "mesh_face_x mesh_face_y" - ds["mesh"].attrs["face_node_connectivity"] = "mesh_face_nodes" - - # mesh node x and y - var_d = { - "mesh_node_x": (["nmesh_node"], self.verts[:, 0]), - "mesh_node_y": (["nmesh_node"], self.verts[:, 1]), - } - ds = ds.assign(var_d) - ds["mesh_node_x"].attrs["units"] = lenunits[self.lenuni] - ds["mesh_node_x"].attrs["standard_name"] = "projection_x_coordinate" - ds["mesh_node_x"].attrs["long_name"] = "Easting" - ds["mesh_node_y"].attrs["units"] = lenunits[self.lenuni] - ds["mesh_node_y"].attrs["standard_name"] = "projection_y_coordinate" - ds["mesh_node_y"].attrs["long_name"] = "Northing" - - # mesh face x and y - x_bnds = [] - x_verts = self.verts[:, 0].reshape(self.nrow + 1, self.ncol + 1) - for i in range(self.nrow): - if i + 1 > self.nrow: - break - for j in range(self.ncol): - if j + 1 <= self.ncol: - bnd = [] - bnd.append(x_verts[i + 1][j]) - bnd.append(x_verts[i + 1][j + 1]) - bnd.append(x_verts[i][j + 1]) - bnd.append(x_verts[i][j]) - x_bnds.append(bnd) - - y_bnds = [] - y_verts = self.verts[:, 1].reshape(self.nrow + 1, self.ncol + 1) - for i in range(self.nrow): - if i + 1 > self.nrow: - break - for j in range(self.ncol): - if j + 1 <= self.ncol: - bnd = [] - bnd.append(y_verts[i + 1][j]) - bnd.append(y_verts[i + 1][j + 1]) - bnd.append(y_verts[i][j + 1]) - bnd.append(y_verts[i][j]) - y_bnds.append(bnd) - - var_d = { - "mesh_face_x": (["nmesh_face"], self.xcellcenters.flatten()), - "mesh_face_xbnds": (["nmesh_face", "max_nmesh_face_nodes"], x_bnds), - "mesh_face_y": (["nmesh_face"], self.ycellcenters.flatten()), - "mesh_face_ybnds": (["nmesh_face", "max_nmesh_face_nodes"], y_bnds), - } - ds = ds.assign(var_d) - ds["mesh_face_x"].attrs["units"] = lenunits[self.lenuni] - ds["mesh_face_x"].attrs["standard_name"] = "projection_x_coordinate" - ds["mesh_face_x"].attrs["long_name"] = "Easting" - ds["mesh_face_x"].attrs["bounds"] = "mesh_face_xbnds" - ds["mesh_face_y"].attrs["units"] = lenunits[self.lenuni] - ds["mesh_face_y"].attrs["standard_name"] = "projection_y_coordinate" - ds["mesh_face_y"].attrs["long_name"] = "Northing" - ds["mesh_face_y"].attrs["bounds"] = "mesh_face_ybnds" - - # mesh face nodes - max_face_nodes = 4 - face_nodes = [] - for r in self.iverts: - nodes = [np.int32(x + 1) for x in r] - nodes.reverse() - face_nodes.append(nodes) - - var_d = { - "mesh_face_nodes": (["nmesh_face", "max_nmesh_face_nodes"], face_nodes), - } - ds = ds.assign(var_d) - ds["mesh_face_nodes"].attrs["cf_role"] = "face_node_connectivity" - ds["mesh_face_nodes"].attrs["long_name"] = ( - "Vertices bounding cell (counterclockwise)" - ) - ds["mesh_face_nodes"].attrs["_FillValue"] = FILLNA_INT32 - ds["mesh_face_nodes"].attrs["start_index"] = np.int32(1) - + return self._layered_mesh_dataset(ds, modeltime) elif mesh is None: - x = self.xoffset + self.xycenters[0] - y = self.yoffset + self.xycenters[1] - z = [float(x) for x in range(1, self.nlay + 1)] - - # set coordinate var bounds - x_bnds = [] - xv = self.xoffset + self.xyedges[0] - for idx, val in enumerate(xv): - if idx + 1 < len(xv): + return self._structured_dataset(ds, modeltime) + + def _layered_mesh_dataset(self, ds, modeltime=None): + FILLNA_INT32 = np.int32(-2147483647) + FILLNA_DBL = 9.96920996838687e36 + lenunits = {0: "m", 1: "ft", 2: "m", 3: "m"} + + # mesh container variable + ds = ds.assign({"mesh": ([], np.int32(1))}) + ds["mesh"].attrs["cf_role"] = "mesh_topology" + ds["mesh"].attrs["long_name"] = "2D mesh topology" + ds["mesh"].attrs["topology_dimension"] = np.int32(2) + ds["mesh"].attrs["face_dimension"] = "nmesh_face" + ds["mesh"].attrs["node_coordinates"] = "mesh_node_x mesh_node_y" + ds["mesh"].attrs["face_coordinates"] = "mesh_face_x mesh_face_y" + ds["mesh"].attrs["face_node_connectivity"] = "mesh_face_nodes" + + # mesh node x and y + var_d = { + "mesh_node_x": (["nmesh_node"], self.verts[:, 0]), + "mesh_node_y": (["nmesh_node"], self.verts[:, 1]), + } + ds = ds.assign(var_d) + ds["mesh_node_x"].attrs["units"] = lenunits[self.lenuni] + ds["mesh_node_x"].attrs["standard_name"] = "projection_x_coordinate" + ds["mesh_node_x"].attrs["long_name"] = "Easting" + ds["mesh_node_y"].attrs["units"] = lenunits[self.lenuni] + ds["mesh_node_y"].attrs["standard_name"] = "projection_y_coordinate" + ds["mesh_node_y"].attrs["long_name"] = "Northing" + + # mesh face x and y + x_bnds = [] + x_verts = self.verts[:, 0].reshape(self.nrow + 1, self.ncol + 1) + for i in range(self.nrow): + if i + 1 > self.nrow: + break + for j in range(self.ncol): + if j + 1 <= self.ncol: bnd = [] - bnd.append(xv[idx]) - bnd.append(xv[idx + 1]) + bnd.append(x_verts[i + 1][j]) + bnd.append(x_verts[i + 1][j + 1]) + bnd.append(x_verts[i][j + 1]) + bnd.append(x_verts[i][j]) x_bnds.append(bnd) - y_bnds = [] - yv = self.yoffset + self.xyedges[1] - for idx, val in enumerate(yv): - if idx + 1 < len(yv): + y_bnds = [] + y_verts = self.verts[:, 1].reshape(self.nrow + 1, self.ncol + 1) + for i in range(self.nrow): + if i + 1 > self.nrow: + break + for j in range(self.ncol): + if j + 1 <= self.ncol: bnd = [] - bnd.append(yv[idx + 1]) - bnd.append(yv[idx]) + bnd.append(y_verts[i + 1][j]) + bnd.append(y_verts[i + 1][j + 1]) + bnd.append(y_verts[i][j + 1]) + bnd.append(y_verts[i][j]) y_bnds.append(bnd) - # create dataset coordinate vars - var_d = { - "time": (["time"], modeltime.totim), - "z": (["z"], z), - "y": (["y"], y), - "x": (["x"], x), - } - ds = ds.assign(var_d) - - # create bound vars - var_d = {"x_bnds": (["x", "bnd"], x_bnds), "y_bnds": (["y", "bnd"], y_bnds)} - ds = ds.assign(var_d) - - ds["time"].attrs["calendar"] = "standard" - ds["time"].attrs["units"] = f"days since {modeltime.start_datetime}" - ds["time"].attrs["axis"] = "T" - ds["time"].attrs["standard_name"] = "time" - ds["time"].attrs["long_name"] = "time" - ds["z"].attrs["units"] = "layer" - ds["z"].attrs["long_name"] = "layer number" - ds["y"].attrs["units"] = lenunits[self.lenuni] - ds["y"].attrs["axis"] = "Y" - ds["y"].attrs["standard_name"] = "projection_y_coordinate" - ds["y"].attrs["long_name"] = "Northing" - ds["y"].attrs["bounds"] = "y_bnds" - ds["x"].attrs["units"] = lenunits[self.lenuni] - ds["x"].attrs["axis"] = "X" - ds["x"].attrs["standard_name"] = "projection_x_coordinate" - ds["x"].attrs["long_name"] = "Easting" - ds["x"].attrs["bounds"] = "x_bnds" - - return ds + var_d = { + "mesh_face_x": (["nmesh_face"], self.xcellcenters.flatten()), + "mesh_face_xbnds": (["nmesh_face", "max_nmesh_face_nodes"], x_bnds), + "mesh_face_y": (["nmesh_face"], self.ycellcenters.flatten()), + "mesh_face_ybnds": (["nmesh_face", "max_nmesh_face_nodes"], y_bnds), + } + ds = ds.assign(var_d) + ds["mesh_face_x"].attrs["units"] = lenunits[self.lenuni] + ds["mesh_face_x"].attrs["standard_name"] = "projection_x_coordinate" + ds["mesh_face_x"].attrs["long_name"] = "Easting" + ds["mesh_face_x"].attrs["bounds"] = "mesh_face_xbnds" + ds["mesh_face_y"].attrs["units"] = lenunits[self.lenuni] + ds["mesh_face_y"].attrs["standard_name"] = "projection_y_coordinate" + ds["mesh_face_y"].attrs["long_name"] = "Northing" + ds["mesh_face_y"].attrs["bounds"] = "mesh_face_ybnds" + + # mesh face nodes + max_face_nodes = 4 + face_nodes = [] + for r in self.iverts: + nodes = [np.int32(x + 1) for x in r] + nodes.reverse() + face_nodes.append(nodes) + + var_d = { + "mesh_face_nodes": (["nmesh_face", "max_nmesh_face_nodes"], face_nodes), + } + ds = ds.assign(var_d) + ds["mesh_face_nodes"].attrs["cf_role"] = "face_node_connectivity" + ds["mesh_face_nodes"].attrs["long_name"] = ( + "Vertices bounding cell (counterclockwise)" + ) + ds["mesh_face_nodes"].attrs["_FillValue"] = FILLNA_INT32 + ds["mesh_face_nodes"].attrs["start_index"] = np.int32(1) + + def _structured_dataset(self, ds, modeltime=None): + lenunits = {0: "m", 1: "ft", 2: "m", 3: "m"} + + x = self.xoffset + self.xycenters[0] + y = self.yoffset + self.xycenters[1] + z = [float(x) for x in range(1, self.nlay + 1)] + + # set coordinate var bounds + x_bnds = [] + xv = self.xoffset + self.xyedges[0] + for idx, val in enumerate(xv): + if idx + 1 < len(xv): + bnd = [] + bnd.append(xv[idx]) + bnd.append(xv[idx + 1]) + x_bnds.append(bnd) + + y_bnds = [] + yv = self.yoffset + self.xyedges[1] + for idx, val in enumerate(yv): + if idx + 1 < len(yv): + bnd = [] + bnd.append(yv[idx + 1]) + bnd.append(yv[idx]) + y_bnds.append(bnd) + + # create dataset coordinate vars + var_d = { + "time": (["time"], modeltime.totim), + "z": (["z"], z), + "y": (["y"], y), + "x": (["x"], x), + } + ds = ds.assign(var_d) + + # create bound vars + var_d = {"x_bnds": (["x", "bnd"], x_bnds), "y_bnds": (["y", "bnd"], y_bnds)} + ds = ds.assign(var_d) + + ds["time"].attrs["calendar"] = "standard" + ds["time"].attrs["units"] = f"days since {modeltime.start_datetime}" + ds["time"].attrs["axis"] = "T" + ds["time"].attrs["standard_name"] = "time" + ds["time"].attrs["long_name"] = "time" + ds["z"].attrs["units"] = "layer" + ds["z"].attrs["long_name"] = "layer number" + ds["y"].attrs["units"] = lenunits[self.lenuni] + ds["y"].attrs["axis"] = "Y" + ds["y"].attrs["standard_name"] = "projection_y_coordinate" + ds["y"].attrs["long_name"] = "Northing" + ds["y"].attrs["bounds"] = "y_bnds" + ds["x"].attrs["units"] = lenunits[self.lenuni] + ds["x"].attrs["axis"] = "X" + ds["x"].attrs["standard_name"] = "projection_x_coordinate" + ds["x"].attrs["long_name"] = "Easting" + ds["x"].attrs["bounds"] = "x_bnds" def _set_structured_iverts(self): """ diff --git a/flopy/discretization/vertexgrid.py b/flopy/discretization/vertexgrid.py index 4242b1c5f1..2e43e36d81 100644 --- a/flopy/discretization/vertexgrid.py +++ b/flopy/discretization/vertexgrid.py @@ -607,7 +607,14 @@ def dataset(self, modeltime=None, mesh=None): FILLNA_DBL = 9.96920996838687e36 lenunits = {0: "m", 1: "ft", 2: "m", 3: "m"} + if mesh is None or mesh.upper() != "LAYERED": + raise ValueError("Vextex grid only supports layered mesh datasets") + + if modeltime is None: + raise ValueError("modeltime required for dataset timeseries") + ds = xr.Dataset() + ds.attrs["modflow_grid"] = "VERTEX" # mesh container variable ds = ds.assign({"mesh": ([], np.int32(1))}) diff --git a/flopy/mf6/tmp/2/mfpackage.py b/flopy/mf6/tmp/2/mfpackage.py new file mode 100644 index 0000000000..d20612f27a --- /dev/null +++ b/flopy/mf6/tmp/2/mfpackage.py @@ -0,0 +1,3801 @@ +import copy +import datetime +import errno +import inspect +import os +import sys +import warnings + +import numpy as np + +from ..mbase import ModelInterface +from ..pakbase import PackageInterface +from ..utils import datautil +from ..utils.check import mf6check +from ..version import __version__ +from .coordinates import modeldimensions +from .data import ( + mfdata, + mfdataarray, + mfdatalist, + mfdataplist, + mfdatascalar, + mfstructure, +) +from .data.mfdatautil import DataSearchOutput, MFComment, cellids_equal +from .data.mfstructure import DatumType, MFDataItemStructure, MFStructure +from .mfbase import ( + ExtFileAction, + FlopyException, + MFDataException, + MFFileMgmt, + MFInvalidTransientBlockHeaderException, + PackageContainer, + PackageContainerType, + ReadAsArraysException, + VerbosityLevel, +) +from .utils.output_util import MF6Output + + +class MFBlockHeader: + """ + Represents the header of a block in a MF6 input file. This class is used + internally by FloPy and its direct use by a user of this library is not + recommend. + + Parameters + ---------- + name : str + Block name + variable_strings : list + List of strings that appear after the block name + comment : MFComment + Comment text in the block header + + Attributes + ---------- + name : str + Block name + variable_strings : list + List of strings that appear after the block name + comment : MFComment + Comment text in the block header + data_items : list + List of MFVariable of the variables contained in this block + + """ + + def __init__( + self, + name, + variable_strings, + comment, + simulation_data=None, + path=None, + block=None, + ): + self.name = name + self.variable_strings = variable_strings + self.block = block + if not ( + (simulation_data is None and path is None) + or (simulation_data is not None and path is not None) + ): + raise FlopyException( + "Block header must be initialized with both " + "simulation_data and path or with neither." + ) + if simulation_data is None: + self.comment = comment + self.simulation_data = None + self.path = path + self.comment_path = None + else: + self.connect_to_dict(simulation_data, path, comment) + # TODO: Get data_items from dictionary + self.data_items = [] + # build block comment paths + self.blk_trailing_comment_path = ("blk_trailing_comment",) + self.blk_post_comment_path = ("blk_post_comment",) + if isinstance(path, list): + path = tuple(path) + if path is not None: + self.blk_trailing_comment_path = path + ( + name, + "blk_trailing_comment", + ) + self.blk_post_comment_path = path + ( + name, + "blk_post_comment", + ) + if self.blk_trailing_comment_path not in simulation_data.mfdata: + simulation_data.mfdata[self.blk_trailing_comment_path] = ( + MFComment("", "", simulation_data, 0) + ) + if self.blk_post_comment_path not in simulation_data.mfdata: + simulation_data.mfdata[self.blk_post_comment_path] = MFComment( + "\n", "", simulation_data, 0 + ) + else: + self.blk_trailing_comment_path = ("blk_trailing_comment",) + self.blk_post_comment_path = ("blk_post_comment",) + + def __lt__(self, other): + transient_key = self.get_transient_key() + if transient_key is None: + return True + else: + other_key = other.get_transient_key() + if other_key is None: + return False + else: + return transient_key < other_key + + def build_header_variables( + self, + simulation_data, + block_header_structure, + block_path, + data, + dimensions, + ): + """Builds data objects to hold header variables.""" + self.data_items = [] + var_path = block_path + (block_header_structure[0].name,) + + # fix up data + fixed_data = [] + if ( + block_header_structure[0].data_item_structures[0].type + == DatumType.keyword + ): + data_item = block_header_structure[0].data_item_structures[0] + fixed_data.append(data_item.name) + if isinstance(data, tuple): + data = list(data) + if isinstance(data, list): + fixed_data = fixed_data + data + else: + fixed_data.append(data) + if len(fixed_data) > 0: + fixed_data = [tuple(fixed_data)] + # create data object + new_data = self.block.data_factory( + simulation_data, + None, + block_header_structure[0], + True, + var_path, + dimensions, + fixed_data, + ) + + self.add_data_item(new_data, data) + + def add_data_item(self, new_data, data): + """Adds data to the block.""" + self.data_items.append(new_data) + while isinstance(data, list): + if len(data) > 0: + data = data[0] + else: + data = None + if not isinstance(data, tuple): + data = (data,) + self.blk_trailing_comment_path += data + self.blk_post_comment_path += data + + def is_same_header(self, block_header): + """Checks if `block_header` is the same header as this header.""" + if len(self.variable_strings) > 0: + if len(self.variable_strings) != len( + block_header.variable_strings + ): + return False + else: + for sitem, oitem in zip( + self.variable_strings, block_header.variable_strings + ): + if sitem != oitem: + return False + return True + elif ( + len(self.data_items) > 0 and len(block_header.variable_strings) > 0 + ): + typ_obj = ( + self.data_items[0].structure.data_item_structures[0].type_obj + ) + if typ_obj == int or typ_obj == float: + return bool( + self.variable_strings[0] + == block_header.variable_strings[0] + ) + else: + return True + elif len(self.data_items) == len(block_header.variable_strings): + return True + return False + + def get_comment(self): + """Get block header comment""" + if self.simulation_data is None: + return self.comment + else: + return self.simulation_data.mfdata[self.comment_path] + + def connect_to_dict(self, simulation_data, path, comment=None): + """Add comment to the simulation dictionary""" + self.simulation_data = simulation_data + self.path = path + self.comment_path = path + ("blk_hdr_comment",) + if comment is None: + simulation_data.mfdata[self.comment_path] = self.comment + else: + simulation_data.mfdata[self.comment_path] = comment + self.comment = None + + def write_header(self, fd): + """Writes block header to file object `fd`. + + Parameters + ---------- + fd : file object + File object to write block header to. + + """ + fd.write(f"BEGIN {self.name}") + if len(self.data_items) > 0: + if isinstance(self.data_items[0], mfdatascalar.MFScalar): + one_based = ( + self.data_items[0].structure.type == DatumType.integer + ) + entry = self.data_items[0].get_file_entry( + values_only=True, one_based=one_based + ) + else: + entry = self.data_items[0].get_file_entry() + fd.write(str(entry.rstrip())) + if len(self.data_items) > 1: + for data_item in self.data_items[1:]: + entry = data_item.get_file_entry(values_only=True) + fd.write(str(entry).rstrip()) + if self.get_comment().text: + fd.write(" ") + self.get_comment().write(fd) + fd.write("\n") + + def write_footer(self, fd): + """Writes block footer to file object `fd`. + + Parameters + ---------- + fd : file object + File object to write block footer to. + + """ + fd.write(f"END {self.name}") + if len(self.data_items) > 0: + one_based = self.data_items[0].structure.type == DatumType.integer + if isinstance(self.data_items[0], mfdatascalar.MFScalar): + entry = self.data_items[0].get_file_entry( + values_only=True, one_based=one_based + ) + else: + entry = self.data_items[0].get_file_entry() + fd.write(str(entry.rstrip())) + fd.write("\n") + + def get_transient_key(self, data_path=None): + """Get transient key associated with this block header.""" + transient_key = None + for index in range(0, len(self.data_items)): + if self.data_items[index].structure.type != DatumType.keyword: + if data_path == self.data_items[index].path: + # avoid infinite recursion + return True + transient_key = self.data_items[index].get_data() + if isinstance(transient_key, np.recarray): + item_struct = self.data_items[index].structure + key_index = item_struct.first_non_keyword_index() + if not ( + key_index is not None + and len(transient_key[0]) > key_index + ): + if key_index is None: + raise FlopyException( + "Block header index could " + "not be determined." + ) + else: + raise FlopyException( + 'Block header index "{}" ' + 'must be less than "{}"' + ".".format(key_index, len(transient_key[0])) + ) + transient_key = transient_key[0][key_index] + break + return transient_key + + +class MFBlock: + """ + Represents a block in a MF6 input file. This class is used internally + by FloPy and use by users of the FloPy library is not recommended. + + Parameters + ---------- + simulation_data : MFSimulationData + Data specific to this simulation + dimensions : MFDimensions + Describes model dimensions including model grid and simulation time + structure : MFVariableStructure + Structure describing block + path : tuple + Unique path to block + + Attributes + ---------- + block_headers : MFBlockHeader + Block header text (BEGIN/END), header variables, comments in the + header + structure : MFBlockStructure + Structure describing block + path : tuple + Unique path to block + datasets : OrderDict + Dictionary of dataset objects with keys that are the name of the + dataset + datasets_keyword : dict + Dictionary of dataset objects with keys that are key words to identify + start of dataset + enabled : bool + If block is being used in the simulation + + """ + + def __init__( + self, + simulation_data, + dimensions, + structure, + path, + model_or_sim, + container_package, + ): + self._simulation_data = simulation_data + self._dimensions = dimensions + self._model_or_sim = model_or_sim + self._container_package = container_package + self.block_headers = [ + MFBlockHeader( + structure.name, + [], + MFComment("", path, simulation_data, 0), + simulation_data, + path, + self, + ) + ] + self.structure = structure + self.path = path + self.datasets = {} + self.datasets_keyword = {} + # initially disable if optional + self.enabled = structure.number_non_optional_data() > 0 + self.loaded = False + self.external_file_name = None + self._structure_init() + + def __repr__(self): + return self._get_data_str(True) + + def __str__(self): + return self._get_data_str(False) + + def _get_data_str(self, formal): + data_str = "" + for dataset in self.datasets.values(): + if formal: + ds_repr = repr(dataset) + if len(ds_repr.strip()) > 0: + data_str = ( + f"{data_str}{dataset.structure.name}\n{dataset!r}\n" + ) + else: + ds_str = str(dataset) + if len(ds_str.strip()) > 0: + data_str = ( + f"{data_str}{dataset.structure.name}\n{dataset!s}\n" + ) + return data_str + + # return an MFScalar, MFList, or MFArray + def data_factory( + self, + sim_data, + model_or_sim, + structure, + enable, + path, + dimensions, + data=None, + package=None, + ): + """Creates the appropriate data child object derived from MFData.""" + data_type = structure.get_datatype() + # examine the data structure and determine the data type + if ( + data_type == mfstructure.DataType.scalar_keyword + or data_type == mfstructure.DataType.scalar + ): + return mfdatascalar.MFScalar( + sim_data, + model_or_sim, + structure, + data, + enable, + path, + dimensions, + ) + elif ( + data_type == mfstructure.DataType.scalar_keyword_transient + or data_type == mfstructure.DataType.scalar_transient + ): + trans_scalar = mfdatascalar.MFScalarTransient( + sim_data, model_or_sim, structure, enable, path, dimensions + ) + if data is not None: + trans_scalar.set_data(data, key=0) + return trans_scalar + elif data_type == mfstructure.DataType.array: + return mfdataarray.MFArray( + sim_data, + model_or_sim, + structure, + data, + enable, + path, + dimensions, + self, + ) + elif data_type == mfstructure.DataType.array_transient: + trans_array = mfdataarray.MFTransientArray( + sim_data, + model_or_sim, + structure, + enable, + path, + dimensions, + self, + ) + if data is not None: + trans_array.set_data(data, key=0) + return trans_array + elif data_type == mfstructure.DataType.list: + if ( + structure.basic_item + and self._container_package.package_type.lower() != "nam" + and self._simulation_data.use_pandas + ): + return mfdataplist.MFPandasList( + sim_data, + model_or_sim, + structure, + data, + enable, + path, + dimensions, + package, + self, + ) + else: + return mfdatalist.MFList( + sim_data, + model_or_sim, + structure, + data, + enable, + path, + dimensions, + package, + self, + ) + elif data_type == mfstructure.DataType.list_transient: + if structure.basic_item and self._simulation_data.use_pandas: + trans_list = mfdataplist.MFPandasTransientList( + sim_data, + model_or_sim, + structure, + enable, + path, + dimensions, + package, + self, + ) + else: + trans_list = mfdatalist.MFTransientList( + sim_data, + model_or_sim, + structure, + enable, + path, + dimensions, + package, + self, + ) + if data is not None: + trans_list.set_data(data, key=0, autofill=True) + return trans_list + elif data_type == mfstructure.DataType.list_multiple: + mult_list = mfdatalist.MFMultipleList( + sim_data, + model_or_sim, + structure, + enable, + path, + dimensions, + package, + self, + ) + if data is not None: + mult_list.set_data(data, key=0, autofill=True) + return mult_list + + def _structure_init(self): + # load datasets keywords into dictionary + for dataset_struct in self.structure.data_structures.values(): + for keyword in dataset_struct.get_keywords(): + self.datasets_keyword[keyword] = dataset_struct + # load block header data items into dictionary + for dataset in self.structure.block_header_structure: + self._new_dataset(dataset.name, dataset, True, None) + + def set_model_relative_path(self, model_ws): + """Sets `model_ws` as the model path relative to the simulation's + path. + + Parameters + ---------- + model_ws : str + Model path relative to the simulation's path. + """ + # update datasets + for key, dataset in self.datasets.items(): + if dataset.structure.file_data: + try: + file_data = dataset.get_data() + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message="Error occurred while " + "getting file data from " + '"{}"'.format(dataset.structure.name), + ) + if file_data: + # update file path location for all file paths + for file_line in file_data: + old_file_name = os.path.split(file_line[0])[1] + file_line[0] = os.path.join(model_ws, old_file_name) + # update block headers + for block_header in self.block_headers: + for dataset in block_header.data_items: + if dataset.structure.file_data: + try: + file_data = dataset.get_data() + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message="Error occurred while " + "getting file data from " + '"{}"'.format(dataset.structure.name), + ) + + if file_data: + # update file path location for all file paths + for file_line in file_data: + old_file_path, old_file_name = os.path.split( + file_line[1] + ) + new_file_path = os.path.join( + model_ws, old_file_name + ) + # update transient keys of datasets within the + # block + for key, idataset in self.datasets.items(): + if isinstance(idataset, mfdata.MFTransient): + idataset.update_transient_key( + file_line[1], new_file_path + ) + file_line[1] = os.path.join( + model_ws, old_file_name + ) + + def add_dataset(self, dataset_struct, data, var_path): + """Add data to this block.""" + try: + self.datasets[var_path[-1]] = self.data_factory( + self._simulation_data, + self._model_or_sim, + dataset_struct, + True, + var_path, + self._dimensions, + data, + self._container_package, + ) + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message="Error occurred while adding" + ' dataset "{}" to block ' + '"{}"'.format(dataset_struct.name, self.structure.name), + ) + + self._simulation_data.mfdata[var_path] = self.datasets[var_path[-1]] + dtype = dataset_struct.get_datatype() + if ( + dtype == mfstructure.DataType.list_transient + or dtype == mfstructure.DataType.list_multiple + or dtype == mfstructure.DataType.array_transient + ): + # build repeating block header(s) + if isinstance(data, dict): + # Add block headers for each dictionary key + for index in data: + if isinstance(index, tuple): + header_list = list(index) + else: + header_list = [index] + self._build_repeating_header(header_list) + elif isinstance(data, list): + # Add a single block header of value 0 + self._build_repeating_header([0]) + elif ( + dtype != mfstructure.DataType.list_multiple + and data is not None + ): + self._build_repeating_header([[0]]) + + return self.datasets[var_path[-1]] + + def _build_repeating_header(self, header_data): + if self.header_exists(header_data[0]): + return + if ( + len(self.block_headers[-1].data_items) == 1 + and self.block_headers[-1].data_items[0].get_data() is not None + ): + block_header_path = self.path + (len(self.block_headers) + 1,) + block_header = MFBlockHeader( + self.structure.name, + [], + MFComment("", self.path, self._simulation_data, 0), + self._simulation_data, + block_header_path, + self, + ) + self.block_headers.append(block_header) + else: + block_header_path = self.path + (len(self.block_headers),) + + struct = self.structure + last_header = self.block_headers[-1] + try: + last_header.build_header_variables( + self._simulation_data, + struct.block_header_structure, + block_header_path, + header_data, + self._dimensions, + ) + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message="Error occurred while building" + " block header variables for block " + '"{}"'.format(last_header.name), + ) + + def _new_dataset( + self, key, dataset_struct, block_header=False, initial_val=None + ): + dataset_path = self.path + (key,) + if block_header: + if ( + dataset_struct.type == DatumType.integer + and initial_val is not None + and len(initial_val) >= 1 + and dataset_struct.get_record_size()[0] == 1 + ): + # stress periods are stored 0 based + initial_val = int(initial_val[0]) - 1 + if isinstance(initial_val, list): + initial_val_path = tuple(initial_val) + initial_val = [tuple(initial_val)] + else: + initial_val_path = initial_val + try: + new_data = self.data_factory( + self._simulation_data, + self._model_or_sim, + dataset_struct, + True, + dataset_path, + self._dimensions, + initial_val, + self._container_package, + ) + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message="Error occurred while adding" + ' dataset "{}" to block ' + '"{}"'.format(dataset_struct.name, self.structure.name), + ) + self.block_headers[-1].add_data_item(new_data, initial_val_path) + + else: + try: + self.datasets[key] = self.data_factory( + self._simulation_data, + self._model_or_sim, + dataset_struct, + True, + dataset_path, + self._dimensions, + initial_val, + self._container_package, + ) + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message="Error occurred while adding" + ' dataset "{}" to block ' + '"{}"'.format(dataset_struct.name, self.structure.name), + ) + for keyword in dataset_struct.get_keywords(): + self.datasets_keyword[keyword] = dataset_struct + + def is_empty(self): + """Returns true if this block is empty.""" + for key, dataset in self.datasets.items(): + try: + has_data = dataset.has_data() + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message="Error occurred while verifying" + ' data of dataset "{}" in block ' + '"{}"'.format(dataset.structure.name, self.structure.name), + ) + + if has_data is not None and has_data: + return False + return True + + def load(self, block_header, fd, strict=True): + """Loads block from file object. file object must be advanced to + beginning of block before calling. + + Parameters + ---------- + block_header : MFBlockHeader + Block header for block block being loaded. + fd : file + File descriptor of file being loaded + strict : bool + Enforce strict MODFLOW 6 file format. + """ + # verify number of header variables + if ( + len(block_header.variable_strings) + < self.structure.number_non_optional_block_header_data() + ): + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + warning_str = ( + 'WARNING: Block header for block "{}" does not ' + "contain the correct number of " + "variables {}".format(block_header.name, self.path) + ) + print(warning_str) + return + + if self.loaded: + # verify header has not already been loaded + for bh_current in self.block_headers: + if bh_current.is_same_header(block_header): + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + warning_str = ( + 'WARNING: Block header for block "{}" is ' + "not a unique block header " + "{}".format(block_header.name, self.path) + ) + print(warning_str) + return + + # init + self.enabled = True + if not self.loaded: + self.block_headers = [] + block_header.block = self + self.block_headers.append(block_header) + + # process any header variable + if len(self.structure.block_header_structure) > 0: + dataset = self.structure.block_header_structure[0] + self._new_dataset( + dataset.name, + dataset, + True, + self.block_headers[-1].variable_strings, + ) + + # handle special readasarrays case + if ( + self._container_package.structure.read_as_arrays + or ( + hasattr(self._container_package, "aux") + and self._container_package.aux.structure.layered + ) + ): + # auxiliary variables may appear with aux variable name as keyword + aux_vars = self._container_package.auxiliary.get_data() + if aux_vars is not None: + for var_name in list(aux_vars[0])[1:]: + self.datasets_keyword[(var_name,)] = ( + self._container_package.aux.structure + ) + + comments = [] + + # capture any initial comments + initial_comment = MFComment("", "", 0) + fd_block = fd + line = fd_block.readline() + datautil.PyListUtil.reset_delimiter_used() + arr_line = datautil.PyListUtil.split_data_line(line) + post_data_comments = MFComment("", "", self._simulation_data, 0) + while MFComment.is_comment(line, True): + initial_comment.add_text(line) + line = fd_block.readline() + arr_line = datautil.PyListUtil.split_data_line(line) + + # if block not empty + external_file_info = None + if not (len(arr_line[0]) > 2 and arr_line[0][:3].upper() == "END"): + if arr_line[0].lower() == "open/close": + # open block contents from external file + fd_block.readline() + root_path = self._simulation_data.mfpath.get_sim_path() + try: + file_name = os.path.split(arr_line[1])[-1] + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print( + f' opening external file "{file_name}"...' + ) + external_file_info = arr_line + except: + type_, value_, traceback_ = sys.exc_info() + message = f'Error reading external file specified in line "{line}"' + raise MFDataException( + self._container_package.model_name, + self._container_package._get_pname(), + self.path, + "reading external file", + self.structure.name, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self._simulation_data.debug, + ) + if len(self.structure.data_structures) <= 1: + # load a single data set + dataset = self.datasets[next(iter(self.datasets))] + try: + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print( + f" loading data {dataset.structure.name}..." + ) + next_line = dataset.load( + line, + fd_block, + self.block_headers[-1], + initial_comment, + external_file_info, + ) + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message='Error occurred while loading data "{}" in ' + 'block "{}" from file "{}"' + ".".format( + dataset.structure.name, + self.structure.name, + fd_block.name, + ), + ) + package_info_list = self._get_package_info(dataset) + if package_info_list is not None: + for package_info in package_info_list: + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print( + f" loading child package {package_info[0]}..." + ) + fname = package_info[1] + if package_info[2] is not None: + fname = os.path.join(package_info[2], fname) + filemgr = self._simulation_data.mfpath + fname = filemgr.strip_model_relative_path( + self._model_or_sim.name, fname + ) + pkg = self._model_or_sim.load_package( + package_info[0], + fname, + package_info[1], + True, + "", + package_info[3], + self._container_package, + ) + if hasattr(self._container_package, package_info[0]): + package_group = getattr( + self._container_package, package_info[0] + ) + package_group._append_package( + pkg, pkg.filename, False + ) + + if next_line[1] is not None: + arr_line = datautil.PyListUtil.split_data_line( + next_line[1] + ) + else: + arr_line = "" + # capture any trailing comments + dataset.post_data_comments = post_data_comments + while arr_line and ( + len(next_line[1]) <= 2 or arr_line[0][:3].upper() != "END" + ): + next_line[1] = fd_block.readline().strip() + arr_line = datautil.PyListUtil.split_data_line( + next_line[1] + ) + if arr_line and ( + len(next_line[1]) <= 2 + or arr_line[0][:3].upper() != "END" + ): + post_data_comments.add_text(" ".join(arr_line)) + else: + # look for keyword and store line as data or comment + try: + key, results = self._find_data_by_keyword( + line, fd_block, initial_comment + ) + except MFInvalidTransientBlockHeaderException as e: + warning_str = f"WARNING: {e}" + print(warning_str) + self.block_headers.pop() + return + + self._save_comments(arr_line, line, key, comments) + if results[1] is None or results[1][:3].upper() != "END": + # block consists of unordered datasets + # load the data sets out of order based on + # initial constants + line = " " + while line != "": + line = fd_block.readline() + arr_line = datautil.PyListUtil.split_data_line(line) + if arr_line: + # determine if at end of block + if ( + len(arr_line[0]) > 2 + and arr_line[0][:3].upper() == "END" + ): + break + # look for keyword and store line as data o + # r comment + key, result = self._find_data_by_keyword( + line, fd_block, initial_comment + ) + self._save_comments(arr_line, line, key, comments) + if ( + result[1] is not None + and result[1][:3].upper() == "END" + ): + break + else: + # block empty, store empty array in block variables + empty_arr = [] + for ds in self.datasets.values(): + if isinstance(ds, mfdata.MFTransient): + transient_key = block_header.get_transient_key() + ds.set_data(empty_arr, key=transient_key) + self.loaded = True + self.is_valid() + + def _find_data_by_keyword(self, line, fd, initial_comment): + first_key = None + nothing_found = False + next_line = [True, line] + while next_line[0] and not nothing_found: + arr_line = datautil.PyListUtil.split_data_line(next_line[1]) + key = datautil.find_keyword(arr_line, self.datasets_keyword) + if key is not None: + ds_name = self.datasets_keyword[key].name + try: + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print(f" loading data {ds_name}...") + next_line = self.datasets[ds_name].load( + next_line[1], + fd, + self.block_headers[-1], + initial_comment, + ) + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message="Error occurred while " + 'loading data "{}" in ' + 'block "{}" from file "{}"' + ".".format(ds_name, self.structure.name, fd.name), + ) + + # see if first item's name indicates a reference to + # another package + package_info_list = self._get_package_info( + self.datasets[ds_name] + ) + if package_info_list is not None: + for package_info in package_info_list: + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print( + f" loading child package {package_info[1]}..." + ) + fname = package_info[1] + if package_info[2] is not None: + fname = os.path.join(package_info[2], fname) + filemgr = self._simulation_data.mfpath + fname = filemgr.strip_model_relative_path( + self._model_or_sim.name, fname + ) + pkg = self._model_or_sim.load_package( + package_info[0], + fname, + package_info[1], + True, + "", + package_info[3], + self._container_package, + ) + if hasattr(self._container_package, package_info[0]): + package_group = getattr( + self._container_package, package_info[0] + ) + package_group._append_package( + pkg, pkg.filename, False + ) + if first_key is None: + first_key = key + nothing_found = False + elif ( + arr_line[0].lower() == "readasarrays" + and self.path[-1].lower() == "options" + and self._container_package.structure.read_as_arrays is False + ): + error_msg = ( + "ERROR: Attempting to read a ReadAsArrays " + "package as a non-ReadAsArrays " + "package {}".format(self.path) + ) + raise ReadAsArraysException(error_msg) + else: + nothing_found = True + + if first_key is None: + # look for recarrays. if there is a lone recarray in this block, + # use it by default + recarrays = self.structure.get_all_recarrays() + if len(recarrays) != 1: + return key, [None, None] + dataset = self.datasets[recarrays[0].name] + ds_result = dataset.load( + line, fd, self.block_headers[-1], initial_comment + ) + + # see if first item's name indicates a reference to another + # package + package_info_list = self._get_package_info(dataset) + if package_info_list is not None: + for package_info in package_info_list: + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print( + f" loading child package {package_info[0]}..." + ) + fname = package_info[1] + if package_info[2] is not None: + fname = os.path.join(package_info[2], fname) + filemgr = self._simulation_data.mfpath + fname = filemgr.strip_model_relative_path( + self._model_or_sim.name, fname + ) + pkg = self._model_or_sim.load_package( + package_info[0], + fname, + None, + True, + "", + package_info[3], + self._container_package, + ) + if hasattr(self._container_package, package_info[0]): + package_group = getattr( + self._container_package, package_info[0] + ) + package_group._append_package(pkg, pkg.filename, False) + + return recarrays[0].keyword, ds_result + else: + return first_key, next_line + + def _get_package_info(self, dataset): + if not dataset.structure.file_data: + return None + for index in range(0, len(dataset.structure.data_item_structures)): + data_item = dataset.structure.data_item_structures[index] + if ( + data_item.type == DatumType.keyword + or data_item.type == DatumType.string + ): + item_name = data_item.name + package_type = item_name[:-1] + model_type = self._model_or_sim.structure.model_type + # not all packages have the same naming convention + # try different naming conventions to find the appropriate + # package + package_types = [ + package_type, + f"{self._container_package.package_type}" + f"{package_type}", + ] + package_type_found = None + for ptype in package_types: + if ( + PackageContainer.package_factory(ptype, model_type) + is not None + ): + package_type_found = ptype + break + if package_type_found is not None: + try: + data = dataset.get_data() + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message="Error occurred while " + 'getting data from "{}" ' + 'in block "{}".'.format( + dataset.structure.name, self.structure.name + ), + ) + package_info_list = [] + if isinstance(data, np.recarray): + for row in data: + self._add_to_info_list( + package_info_list, + row[index], + package_type_found, + ) + else: + self._add_to_info_list( + package_info_list, data, package_type_found + ) + + return package_info_list + return None + + def _add_to_info_list( + self, package_info_list, file_location, package_type_found + ): + file_path, file_name = os.path.split(file_location) + dict_package_name = f"{package_type_found}_{self.path[-2]}" + package_info_list.append( + ( + package_type_found, + file_name, + file_path, + dict_package_name, + ) + ) + + def _save_comments(self, arr_line, line, key, comments): + # FIX: Save these comments somewhere in the data set + if key not in self.datasets_keyword: + if MFComment.is_comment(key, True): + if comments: + comments.append("\n") + comments.append(arr_line) + + def write(self, fd, ext_file_action=ExtFileAction.copy_relative_paths): + """Writes block to a file object. + + Parameters + ---------- + fd : file object + File object to write to. + + """ + # never write an empty block + is_empty = self.is_empty() + if ( + is_empty + and self.structure.name.lower() != "exchanges" + and self.structure.name.lower() != "options" + and self.structure.name.lower() != "sources" + and self.structure.name.lower() != "stressperioddata" + ): + return + if self.structure.repeating(): + repeating_datasets = self._find_repeating_datasets() + for repeating_dataset in repeating_datasets: + # resolve any missing block headers + self._add_missing_block_headers(repeating_dataset) + for block_header in sorted(self.block_headers): + # write block + self._write_block(fd, block_header, ext_file_action) + else: + self._write_block(fd, self.block_headers[0], ext_file_action) + + def _add_missing_block_headers(self, repeating_dataset): + key_data_list = repeating_dataset.get_active_key_list() + # assemble a dictionary of data keys and empty keys + key_dict = {} + for key in key_data_list: + key_dict[key[0]] = True + for key, value in repeating_dataset.empty_keys.items(): + if value: + key_dict[key] = True + for key in key_dict.keys(): + has_data = repeating_dataset.has_data(key) + empty_key = ( + key in repeating_dataset.empty_keys + and repeating_dataset.empty_keys[key] + ) + if not self.header_exists(key) and (has_data or empty_key): + self._build_repeating_header([key]) + + def header_exists(self, key, data_path=None): + if not isinstance(key, list): + if key is None: + return + comp_key_list = [key] + else: + comp_key_list = key + for block_header in self.block_headers: + transient_key = block_header.get_transient_key(data_path) + if transient_key is True: + return + for comp_key in comp_key_list: + if transient_key is not None and transient_key == comp_key: + return True + return False + + def set_all_data_external( + self, + base_name, + check_data=True, + external_data_folder=None, + binary=False, + ): + """Sets the block's list and array data to be stored externally, + base_name is external file name's prefix, check_data determines + if data error checking is enabled during this process. + + Warning + ------- + The MF6 check mechanism is deprecated pending reimplementation + in a future release. While the checks API will remain in place + through 3.x, it may be unstable, and will likely change in 4.x. + + Parameters + ---------- + base_name : str + Base file name of external files where data will be written to. + check_data : bool + Whether to do data error checking. + external_data_folder + Folder where external data will be stored + binary: bool + Whether file will be stored as binary + + """ + + for key, dataset in self.datasets.items(): + lst_data = isinstance(dataset, mfdatalist.MFList) or isinstance( + dataset, mfdataplist.MFPandasList + ) + if ( + isinstance(dataset, mfdataarray.MFArray) + or (lst_data and dataset.structure.type == DatumType.recarray) + and dataset.enabled + ): + if not binary or ( + lst_data + and ( + dataset.data_dimensions.package_dim.boundnames() + or not dataset.structure.basic_item + ) + ): + ext = "txt" + binary = False + else: + ext = "bin" + file_path = f"{base_name}_{dataset.structure.name}.{ext}" + replace_existing_external = False + if external_data_folder is not None: + # get simulation root path + root_path = self._simulation_data.mfpath.get_sim_path() + # get model relative path, if it exists + if isinstance(self._model_or_sim, ModelInterface): + name = self._model_or_sim.name + rel_path = ( + self._simulation_data.mfpath.model_relative_path[ + name + ] + ) + if rel_path is not None: + root_path = os.path.join(root_path, rel_path) + full_path = os.path.join(root_path, external_data_folder) + if not os.path.exists(full_path): + # create new external data folder + os.makedirs(full_path) + file_path = os.path.join(external_data_folder, file_path) + replace_existing_external = True + dataset.store_as_external_file( + file_path, + replace_existing_external=replace_existing_external, + check_data=check_data, + binary=binary, + ) + + def set_all_data_internal(self, check_data=True): + """Sets the block's list and array data to be stored internally, + check_data determines if data error checking is enabled during this + process. + + Warning + ------- + The MF6 check mechanism is deprecated pending reimplementation + in a future release. While the checks API will remain in place + through 3.x, it may be unstable, and will likely change in 4.x. + + Parameters + ---------- + check_data : bool + Whether to do data error checking. + + """ + + for key, dataset in self.datasets.items(): + if ( + isinstance(dataset, mfdataarray.MFArray) + or ( + ( + isinstance(dataset, mfdatalist.MFList) + or isinstance(dataset, mfdataplist.MFPandasList) + ) + and dataset.structure.type == DatumType.recarray + ) + and dataset.enabled + ): + dataset.store_internal(check_data=check_data) + + def _find_repeating_datasets(self): + repeating_datasets = [] + for key, dataset in self.datasets.items(): + if dataset.repeating: + repeating_datasets.append(dataset) + return repeating_datasets + + def _prepare_external(self, fd, file_name, binary=False): + fd_main = fd + fd_path = self._simulation_data.mfpath.get_model_path(self.path[0]) + # resolve full file and folder path + fd_file_path = os.path.join(fd_path, file_name) + fd_folder_path = os.path.split(fd_file_path)[0] + if fd_folder_path != "": + if not os.path.exists(fd_folder_path): + # create new external data folder + os.makedirs(fd_folder_path) + return fd_main, fd_file_path + + def _write_block(self, fd, block_header, ext_file_action): + transient_key = None + basic_list = False + dataset_one = list(self.datasets.values())[0] + if isinstance( + dataset_one, + (mfdataplist.MFPandasList, mfdataplist.MFPandasTransientList), + ): + basic_list = True + for dataset in self.datasets.values(): + assert isinstance( + dataset, + ( + mfdataplist.MFPandasList, + mfdataplist.MFPandasTransientList, + ), + ) + # write block header + block_header.write_header(fd) + if len(block_header.data_items) > 0: + transient_key = block_header.get_transient_key() + + # gather data sets to write + data_set_output = [] + data_found = False + for key, dataset in self.datasets.items(): + try: + if transient_key is None: + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print( + f" writing data {dataset.structure.name}..." + ) + if basic_list: + ext_fname = dataset.external_file_name() + if ext_fname is not None: + binary = dataset.binary_ext_data() + # write block contents to external file + fd_main, fd = self._prepare_external( + fd, ext_fname, binary + ) + dataset.write_file_entry(fd, fd_main=fd_main) + fd = fd_main + else: + dataset.write_file_entry(fd) + else: + data_set_output.append( + dataset.get_file_entry( + ext_file_action=ext_file_action + ) + ) + data_found = True + else: + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print( + " writing data {} ({}).." ".".format( + dataset.structure.name, transient_key + ) + ) + if basic_list: + ext_fname = dataset.external_file_name(transient_key) + if ext_fname is not None: + binary = dataset.binary_ext_data(transient_key) + # write block contents to external file + fd_main, fd = self._prepare_external( + fd, ext_fname, binary + ) + dataset.write_file_entry( + fd, + transient_key, + ext_file_action=ext_file_action, + fd_main=fd_main, + ) + fd = fd_main + else: + dataset.write_file_entry( + fd, + transient_key, + ext_file_action=ext_file_action, + ) + else: + if dataset.repeating: + output = dataset.get_file_entry( + transient_key, ext_file_action=ext_file_action + ) + if output is not None: + data_set_output.append(output) + data_found = True + else: + data_set_output.append( + dataset.get_file_entry( + ext_file_action=ext_file_action + ) + ) + data_found = True + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message=( + "Error occurred while writing data " + f'"{dataset.structure.name}" in block ' + f'"{self.structure.name}" to file "{fd.name}"' + ), + ) + if not data_found: + return + if not basic_list: + # write block header + block_header.write_header(fd) + + if self.external_file_name is not None: + indent_string = self._simulation_data.indent_string + fd.write( + f"{indent_string}open/close " + f'"{self.external_file_name}"\n' + ) + # write block contents to external file + fd_main, fd = self._prepare_external( + fd, self.external_file_name + ) + # write data sets + for output in data_set_output: + fd.write(output) + + # write trailing comments + pth = block_header.blk_trailing_comment_path + if pth in self._simulation_data.mfdata: + self._simulation_data.mfdata[pth].write(fd) + + if self.external_file_name is not None and not basic_list: + # switch back writing to package file + fd.close() + fd = fd_main + + # write block footer + block_header.write_footer(fd) + + # write post block comments + pth = block_header.blk_post_comment_path + if pth in self._simulation_data.mfdata: + self._simulation_data.mfdata[pth].write(fd) + + # write extra line if comments are off + if not self._simulation_data.comments_on: + fd.write("\n") + + def is_allowed(self): + """Determine if block is valid based on the values of dependent + MODFLOW variables.""" + if self.structure.variable_dependant_path: + # fill in empty part of the path with the current path + if len(self.structure.variable_dependant_path) == 3: + dependant_var_path = ( + self.path[0], + ) + self.structure.variable_dependant_path + elif len(self.structure.variable_dependant_path) == 2: + dependant_var_path = ( + self.path[0], + self.path[1], + ) + self.structure.variable_dependant_path + elif len(self.structure.variable_dependant_path) == 1: + dependant_var_path = ( + self.path[0], + self.path[1], + self.path[2], + ) + self.structure.variable_dependant_path + else: + dependant_var_path = None + + # get dependency + dependant_var = None + mf_data = self._simulation_data.mfdata + if dependant_var_path in mf_data: + dependant_var = mf_data[dependant_var_path] + + # resolve dependency + if self.structure.variable_value_when_active[0] == "Exists": + exists = self.structure.variable_value_when_active[1] + if dependant_var and exists.lower() == "true": + return True + elif not dependant_var and exists.lower() == "false": + return True + else: + return False + elif not dependant_var: + return False + elif self.structure.variable_value_when_active[0] == ">": + min_val = self.structure.variable_value_when_active[1] + if dependant_var > float(min_val): + return True + else: + return False + elif self.structure.variable_value_when_active[0] == "<": + max_val = self.structure.variable_value_when_active[1] + if dependant_var < float(max_val): + return True + else: + return False + return True + + def is_valid(self): + """ + Returns true if the block is valid. + """ + # check data sets + for dataset in self.datasets.values(): + # Non-optional datasets must be enabled + if not dataset.structure.optional and not dataset.enabled: + return False + # Enabled blocks must be valid + if dataset.enabled and not dataset.is_valid: + return False + # check variables + for block_header in self.block_headers: + for dataset in block_header.data_items: + # Non-optional datasets must be enabled + if not dataset.structure.optional and not dataset.enabled: + return False + # Enabled blocks must be valid + if dataset.enabled and not dataset.is_valid(): + return False + + +class MFPackage(PackageInterface): + """ + Provides an interface for the user to specify data to build a package. + + Parameters + ---------- + parent : MFModel, MFSimulation, or MFPackage + The parent model, simulation, or package containing this package + package_type : str + String defining the package type + filename : str or PathLike + Name or path of file where this package is stored + quoted_filename : str + Filename with quotes around it when there is a space in the name + pname : str + Package name + loading_package : bool + Whether or not to add this package to the parent container's package + list during initialization + + Attributes + ---------- + blocks : dict + Dictionary of blocks contained in this package by block name + path : tuple + Data dictionary path to this package + structure : PackageStructure + Describes the blocks and data contain in this package + dimensions : PackageDimension + Resolves data dimensions for data within this package + + """ + + def __init__( + self, + parent, + package_type, + filename=None, + pname=None, + loading_package=False, + **kwargs, + ): + parent_file = kwargs.pop("parent_file", None) + if isinstance(parent, MFPackage): + self.model_or_sim = parent.model_or_sim + self.parent_file = parent + elif parent_file is not None: + self.model_or_sim = parent + self.parent_file = parent_file + else: + self.model_or_sim = parent + self.parent_file = None + _internal_package = kwargs.pop("_internal_package", False) + if _internal_package: + self.internal_package = True + else: + self.internal_package = False + self._data_list = [] + self._package_type = package_type + if self.model_or_sim.type == "Model" and package_type.lower() != "nam": + self.model_name = self.model_or_sim.name + else: + self.model_name = None + + # a package must have a dfn_file_name + if not hasattr(self, "dfn_file_name"): + self.dfn_file_name = "" + + if ( + self.model_or_sim.type != "Model" + and self.model_or_sim.type != "Simulation" + ): + message = ( + "Invalid model_or_sim parameter. Expecting either a " + 'model or a simulation. Instead type "{}" was ' + "given.".format(type(self.model_or_sim)) + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.model_name, + pname, + "", + "initializing package", + None, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self.model_or_sim.simulation_data.debug, + ) + + self._package_container = PackageContainer( + self.model_or_sim.simulation_data + ) + self.simulation_data = self.model_or_sim.simulation_data + + self.blocks = {} + self.container_type = [] + self.loading_package = loading_package + if pname is not None: + if not isinstance(pname, str): + message = ( + "Invalid pname parameter. Expecting type str. " + 'Instead type "{}" was ' + "given.".format(type(pname)) + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.model_name, + pname, + "", + "initializing package", + None, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self.model_or_sim.simulation_data.debug, + ) + + self.package_name = pname.lower() + else: + self.package_name = None + + if filename is None: + if self.model_or_sim.type == "Simulation": + # filename uses simulation base name + base_name = os.path.basename( + os.path.normpath(self.model_or_sim.name) + ) + self._filename = f"{base_name}.{package_type}" + else: + # filename uses model base name + self._filename = f"{self.model_or_sim.name}.{package_type}" + else: + if not isinstance(filename, (str, os.PathLike)): + message = ( + "Invalid fname parameter. Expecting type str. " + 'Instead type "{}" was ' + "given.".format(type(filename)) + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.model_name, + pname, + "", + "initializing package", + None, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self.model_or_sim.simulation_data.debug, + ) + self._filename = datautil.clean_filename( + str(filename).replace("\\", "/") + ) + self.path, self.structure = self.model_or_sim.register_package( + self, not loading_package, pname is None, filename is None + ) + self.dimensions = self.create_package_dimensions() + + if self.path is None: + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + print( + "WARNING: Package type {} failed to register property." + " {}".format(self._package_type, self.path) + ) + if self.parent_file is not None: + self.container_type.append(PackageContainerType.package) + # init variables that may be used later + self.post_block_comments = None + self.last_error = None + self.bc_color = "black" + self.__inattr = False + self._child_package_groups = {} + child_builder_call = kwargs.pop("child_builder_call", None) + if ( + self.parent_file is not None + and child_builder_call is None + and package_type in self.parent_file._child_package_groups + ): + # initialize as part of the parent's child package group + chld_pkg_grp = self.parent_file._child_package_groups[package_type] + chld_pkg_grp.init_package(self, self._filename, False) + + # remove any remaining valid kwargs + key_list = list(kwargs.keys()) + for key in key_list: + if "filerecord" in key and hasattr(self, f"{key}"): + kwargs.pop(f"{key}") + # check for extraneous kwargs + if len(kwargs) > 0: + kwargs_str = ", ".join(kwargs.keys()) + excpt_str = ( + f'Extraneous kwargs "{kwargs_str}" provided to MFPackage.' + ) + raise FlopyException(excpt_str) + + def __init_subclass__(cls): + """Register package type""" + super().__init_subclass__() + PackageContainer.packages_by_abbr[cls.package_abbr] = cls + + def __setattr__(self, name, value): + if hasattr(self, name) and getattr(self, name) is not None: + attribute = object.__getattribute__(self, name) + if attribute is not None and isinstance(attribute, mfdata.MFData): + try: + if isinstance(attribute, mfdatalist.MFList): + attribute.set_data(value, autofill=True) + else: + attribute.set_data(value) + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self.model_name, + package=self._get_pname(), + ) + return + + if all( + hasattr(self, attr) for attr in ["model_or_sim", "_package_type"] + ): + if hasattr(self.model_or_sim, "_mg_resync"): + if not self.model_or_sim._mg_resync: + self.model_or_sim._mg_resync = self._mg_resync + + super().__setattr__(name, value) + + def __repr__(self): + return self._get_data_str(True) + + def __str__(self): + return self._get_data_str(False) + + @property + def filename(self): + """Package's file name.""" + return self._filename + + @property + def quoted_filename(self): + """Package's file name with quotes if there is a space.""" + if " " in self._filename: + return f'"{self._filename}"' + return self._filename + + @filename.setter + def filename(self, fname): + """Package's file name.""" + if ( + isinstance(self.parent_file, MFPackage) + and self.package_type in self.parent_file._child_package_groups + ): + fname = datautil.clean_filename(fname) + try: + child_pkg_group = self.parent_file._child_package_groups[ + self.structure.file_type + ] + child_pkg_group._update_filename(self._filename, fname) + except Exception: + print( + "WARNING: Unable to update file name for parent" + f"package of {self.package_name}." + ) + if self.model_or_sim is not None and fname is not None: + if self._package_type != "nam": + self.model_or_sim.update_package_filename(self, fname) + self._filename = fname + + @property + def package_type(self): + """String describing type of package""" + return self._package_type + + @property + def name(self): + """Name of package""" + return [self.package_name] + + @name.setter + def name(self, name): + """Name of package""" + self.package_name = name + + @property + def parent(self): + """Parent package""" + return self.model_or_sim + + @parent.setter + def parent(self, parent): + """Parent package""" + assert False, "Do not use this setter to set the parent" + + @property + def plottable(self): + """If package is plottable""" + if self.model_or_sim.type == "Simulation": + return False + else: + return True + + @property + def output(self): + """ + Method to get output associated with a specific package + + Returns + ------- + MF6Output object + """ + return MF6Output(self) + + @property + def data_list(self): + """List of data in this package.""" + # return [data_object, data_object, ...] + return self._data_list + + @property + def package_key_dict(self): + """ + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_type_dict + + @property + def package_names(self): + """Returns a list of package names. + + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_names + + @property + def package_dict(self): + """ + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_dict + + @property + def package_type_dict(self): + """ + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_type_dict + + @property + def package_name_dict(self): + """ + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_name_dict + + @property + def package_filename_dict(self): + """ + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_filename_dict + + def get_package(self, name=None, type_only=False, name_only=False): + """ + Finds a package by package name, package key, package type, or partial + package name. returns either a single package, a list of packages, + or None. + + Parameters + ---------- + name : str + Name or type of the package, 'my-riv-1, 'RIV', 'LPF', etc. + type_only : bool + Search for package by type only + name_only : bool + Search for package by name only + + Returns + ------- + pp : Package object + + """ + return self._package_container.get_package(name, type_only, name_only) + + def add_package(self, package): + pkg_type = package.package_type.lower() + if pkg_type in self._package_container.package_type_dict: + for existing_pkg in self._package_container.package_type_dict[ + pkg_type + ]: + if existing_pkg is package: + # do not add the same package twice + return + self._package_container.add_package(package) + + def _get_aux_data(self, aux_names): + if hasattr(self, "stress_period_data"): + spd = self.stress_period_data.get_data() + if ( + 0 in spd + and spd[0] is not None + and aux_names[0][1] in spd[0].dtype.names + ): + return spd + if hasattr(self, "packagedata"): + pd = self.packagedata.get_data() + if aux_names[0][1] in pd.dtype.names: + return pd + if hasattr(self, "perioddata"): + spd = self.perioddata.get_data() + if ( + 0 in spd + and spd[0] is not None + and aux_names[0][1] in spd[0].dtype.names + ): + return spd + if hasattr(self, "aux"): + return self.aux.get_data() + return None + + def _boundnames_active(self): + if hasattr(self, "boundnames"): + if self.boundnames.get_data(): + return True + return False + + def check(self, f=None, verbose=True, level=1, checktype=None): + """ + Data check, returns True on success. + + Warning + ------- + The MF6 check mechanism is deprecated pending reimplementation + in a future release. While the checks API will remain in place + through 3.x, it may be unstable, and will likely change in 4.x. + """ + + if checktype is None: + checktype = mf6check + # do general checks + chk = super().check(f, verbose, level, checktype) + + # do mf6 specific checks + if hasattr(self, "auxiliary"): + # auxiliary variable check + # check if auxiliary variables are defined + aux_names = self.auxiliary.get_data() + if aux_names is not None and len(aux_names[0]) > 1: + num_aux_names = len(aux_names[0]) - 1 + # check for stress period data + aux_data = self._get_aux_data(aux_names) + if aux_data is not None and len(aux_data) > 0: + # make sure the check object exists + if chk is None: + chk = self._get_check(f, verbose, level, checktype) + if isinstance(aux_data, dict): + aux_datasets = list(aux_data.values()) + else: + aux_datasets = [aux_data] + dataset_type = "unknown" + for dataset in aux_datasets: + if isinstance(dataset, np.recarray): + dataset_type = "recarray" + break + elif isinstance(dataset, np.ndarray): + dataset_type = "ndarray" + break + # if aux data is in a list + if dataset_type == "recarray": + # check for time series data + time_series_name_dict = {} + if hasattr(self, "ts") and hasattr( + self.ts, "time_series_namerecord" + ): + # build dictionary of time series data variables + ts_nr = self.ts.time_series_namerecord.get_data() + if ts_nr is not None: + for item in ts_nr: + if len(item) > 0 and item[0] is not None: + time_series_name_dict[item[0]] = True + # auxiliary variables are last unless boundnames + # defined, then second to last + if self._boundnames_active(): + offset = 1 + else: + offset = 0 + + # loop through stress period datasets with aux data + for data in aux_datasets: + if isinstance(data, np.recarray): + for row in data: + row_size = len(row) + aux_start_loc = ( + row_size - num_aux_names - offset - 1 + ) + # loop through auxiliary variables + for idx, var in enumerate( + list(aux_names[0])[1:] + ): + # get index of current aux variable + data_index = aux_start_loc + idx + # verify auxiliary value is either + # numeric or time series variable + if ( + not datautil.DatumUtil.is_float( + row[data_index] + ) + and row[data_index] + not in time_series_name_dict + ): + desc = ( + f"Invalid non-numeric " + f"value " + f"'{row[data_index]}' " + f"in auxiliary data." + ) + chk._add_to_summary( + "Error", + desc=desc, + package=self.package_name, + ) + # else if stress period data is arrays + elif dataset_type == "ndarray": + # loop through auxiliary stress period datasets + for data in aux_datasets: + # verify auxiliary value is either numeric or time + # array series variable + if isinstance(data, np.ndarray): + val = np.isnan(np.sum(data)) + if val: + desc = ( + "One or more nan values were " + "found in auxiliary data." + ) + chk._add_to_summary( + "Warning", + desc=desc, + package=self.package_name, + ) + return chk + + def _get_nan_exclusion_list(self): + excl_list = [] + if hasattr(self, "stress_period_data"): + spd_struct = self.stress_period_data.structure + for item_struct in spd_struct.data_item_structures: + if item_struct.optional or item_struct.keystring_dict: + excl_list.append(item_struct.name) + return excl_list + + def _get_data_str(self, formal, show_data=True): + data_str = ( + "package_name = {}\nfilename = {}\npackage_type = {}" + "\nmodel_or_simulation_package = {}" + "\n{}_name = {}" + "\n".format( + self._get_pname(), + self._filename, + self.package_type, + self.model_or_sim.type.lower(), + self.model_or_sim.type.lower(), + self.model_or_sim.name, + ) + ) + if self.parent_file is not None and formal: + data_str = ( + f"{data_str}parent_file = {self.parent_file._get_pname()}\n\n" + ) + else: + data_str = f"{data_str}\n" + if show_data: + for block in self.blocks.values(): + if formal: + bl_repr = repr(block) + if len(bl_repr.strip()) > 0: + data_str = ( + "{}Block {}\n--------------------\n{}" "\n".format( + data_str, block.structure.name, repr(block) + ) + ) + else: + bl_str = str(block) + if len(bl_str.strip()) > 0: + data_str = ( + "{}Block {}\n--------------------\n{}" "\n".format( + data_str, block.structure.name, str(block) + ) + ) + return data_str + + def _get_pname(self): + if self.package_name is not None: + return str(self.package_name) + else: + return str(self._filename) + + def _get_block_header_info(self, line, path): + # init + header_variable_strs = [] + arr_clean_line = line.strip().split() + header_comment = MFComment( + "", path + (arr_clean_line[1],), self.simulation_data, 0 + ) + # break header into components + if len(arr_clean_line) < 2: + message = ( + "Block header does not contain a name. Name " + 'expected in line "{}".'.format(line) + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.model_name, + self._get_pname(), + self.path, + "parsing block header", + None, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self.simulation_data.debug, + ) + elif len(arr_clean_line) == 2: + return MFBlockHeader( + arr_clean_line[1], + header_variable_strs, + header_comment, + self.simulation_data, + path, + ) + else: + # process text after block name + comment = False + for entry in arr_clean_line[2:]: + # if start of comment + if MFComment.is_comment(entry.strip()[0]): + comment = True + if comment: + header_comment.text = " ".join( + [header_comment.text, entry] + ) + else: + header_variable_strs.append(entry) + return MFBlockHeader( + arr_clean_line[1], + header_variable_strs, + header_comment, + self.simulation_data, + path, + ) + + def _update_size_defs(self): + # build temporary data lookup by name + data_lookup = {} + for block in self.blocks.values(): + for dataset in block.datasets.values(): + data_lookup[dataset.structure.name] = dataset + + # loop through all data + for block in self.blocks.values(): + for dataset in block.datasets.values(): + # if data shape is 1-D + if ( + dataset.structure.shape + and len(dataset.structure.shape) == 1 + ): + # if shape name is data in this package + if dataset.structure.shape[0] in data_lookup: + size_def = data_lookup[dataset.structure.shape[0]] + size_def_name = size_def.structure.name + + if isinstance(dataset, mfdata.MFTransient): + # for transient data always use the maximum size + new_size = -1 + for key in dataset.get_active_key_list(): + try: + data = dataset.get_data(key=key[0]) + except (OSError, MFDataException): + # TODO: Handle case where external file + # path has been moved + data = None + if data is not None: + data_len = len(data) + if data_len > new_size: + new_size = data_len + else: + # for all other data set max to size + new_size = -1 + try: + data = dataset.get_data() + except (OSError, MFDataException): + # TODO: Handle case where external file + # path has been moved + data = None + if data is not None: + new_size = len(dataset.get_data()) + + if size_def.get_data() is None: + current_size = -1 + else: + current_size = size_def.get_data() + + if new_size > current_size: + # store current size + size_def.set_data(new_size) + + # informational message to the user + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + print( + "INFORMATION: {} in {} changed to {} " + "based on size of {}".format( + size_def_name, + size_def.structure.path[:-1], + new_size, + dataset.structure.name, + ) + ) + + def inspect_cells(self, cell_list, stress_period=None): + """ + Inspect model cells. Returns package data associated with cells. + + Parameters + ---------- + cell_list : list of tuples + List of model cells. Each model cell is a tuple of integers. + ex: [(1,1,1), (2,4,3)] + stress_period : int + For transient data, only return data from this stress period. If + not specified or None, all stress period data will be returned. + + Returns + ------- + output : array + Array containing inspection results + + """ + data_found = [] + + # loop through blocks + local_index_names = [] + local_index_blocks = [] + local_index_values = [] + local_index_cellids = [] + # loop through blocks in package + for block in self.blocks.values(): + # loop through data in block + for dataset in block.datasets.values(): + if isinstance(dataset, mfdatalist.MFList): + # handle list data + cellid_column = None + local_index_name = None + # loop through list data column definitions + for index, data_item in enumerate( + dataset.structure.data_item_structures + ): + if index == 0 and data_item.type == DatumType.integer: + local_index_name = data_item.name + # look for cellid column in list data row + if isinstance(data_item, MFDataItemStructure) and ( + data_item.is_cellid or data_item.possible_cellid + ): + cellid_column = index + break + if cellid_column is not None: + data_output = DataSearchOutput(dataset.path) + local_index_vals = [] + local_index_cells = [] + # get data + if isinstance(dataset, mfdatalist.MFTransientList): + # data may be in multiple transient blocks, get + # data from appropriate blocks + main_data = dataset.get_data(stress_period) + if stress_period is not None: + main_data = {stress_period: main_data} + else: + # data is all in one block, get data + main_data = {-1: dataset.get_data()} + + # loop through each dataset + for key, value in main_data.items(): + if value is None: + continue + if data_output.data_header is None: + data_output.data_header = value.dtype.names + # loop through list data rows + for line in value: + # loop through list of cells we are searching + # for + for cell in cell_list: + if isinstance( + line[cellid_column], tuple + ) and cellids_equal( + line[cellid_column], cell + ): + # save data found + data_output.data_entries.append(line) + data_output.data_entry_ids.append(cell) + data_output.data_entry_stress_period.append( + key + ) + if datautil.DatumUtil.is_int(line[0]): + # save index data for further + # processing. assuming index is + # always first entry + local_index_vals.append(line[0]) + local_index_cells.append(cell) + + if ( + local_index_name is not None + and len(local_index_vals) > 0 + ): + # capture index lookups for scanning related data + local_index_names.append(local_index_name) + local_index_blocks.append(block.path[-1]) + local_index_values.append(local_index_vals) + local_index_cellids.append(local_index_cells) + if len(data_output.data_entries) > 0: + data_found.append(data_output) + elif isinstance(dataset, mfdataarray.MFArray): + # handle array data + data_shape = copy.deepcopy( + dataset.structure.data_item_structures[0].shape + ) + if dataset.path[-1] == "top": + # top is a special case where the two datasets + # need to be combined to get the correct layer top + model_grid = self.model_or_sim.modelgrid + main_data = {-1: model_grid.top_botm} + data_shape.append("nlay") + else: + if isinstance(dataset, mfdataarray.MFTransientArray): + # data may be in multiple blocks, get data from + # appropriate blocks + main_data = dataset.get_data(stress_period) + if stress_period is not None: + main_data = {stress_period: main_data} + else: + # data is all in one block, get a process data + main_data = {-1: dataset.get_data()} + if main_data is None: + continue + data_output = DataSearchOutput(dataset.path) + # loop through datasets + for key, array_data in main_data.items(): + if array_data is None: + continue + self.model_or_sim.match_array_cells( + cell_list, data_shape, array_data, key, data_output + ) + if len(data_output.data_entries) > 0: + data_found.append(data_output) + + if len(local_index_names) > 0: + # look for data that shares the index value with data found + # for example a shared well or reach number + for block in self.blocks.values(): + # loop through data + for dataset in block.datasets.values(): + if isinstance(dataset, mfdatalist.MFList): + data_item = dataset.structure.data_item_structures[0] + data_output = DataSearchOutput(dataset.path) + # loop through previous data found + for ( + local_index_name, + local_index_vals, + cell_ids, + local_block_name, + ) in zip( + local_index_names, + local_index_values, + local_index_cellids, + local_index_blocks, + ): + if local_block_name == block.path[-1]: + continue + if ( + isinstance(data_item, MFDataItemStructure) + and data_item.name == local_index_name + and data_item.type == DatumType.integer + ): + # matching data index type found, get data + if isinstance( + dataset, mfdatalist.MFTransientList + ): + # data may be in multiple blocks, get data + # from appropriate blocks + main_data = dataset.get_data(stress_period) + if stress_period is not None: + main_data = {stress_period: main_data} + else: + # data is all in one block + main_data = {-1: dataset.get_data()} + # loop through the data + for key, value in main_data.items(): + if value is None: + continue + if data_output.data_header is None: + data_output.data_header = ( + value.dtype.names + ) + # loop through each row of data + for line in value: + # loop through the index values we are + # looking for + for index_val, cell_id in zip( + local_index_vals, cell_ids + ): + # try to match index values we are + # looking for to the data + if index_val == line[0]: + # save data found + data_output.data_entries.append( + line + ) + data_output.data_entry_ids.append( + index_val + ) + data_output.data_entry_cellids.append( + cell_id + ) + data_output.data_entry_stress_period.append( + key + ) + if len(data_output.data_entries) > 0: + data_found.append(data_output) + return data_found + + def remove(self): + """Removes this package from the simulation/model it is currently a + part of. + """ + self.model_or_sim.remove_package(self) + + def build_child_packages_container(self, pkg_type, filerecord): + """Builds a container object for any child packages. This method is + only intended for FloPy internal use.""" + # get package class + package_obj = PackageContainer.package_factory( + pkg_type, self.model_or_sim.model_type + ) + # create child package object + child_pkgs_name = f"utl{pkg_type}packages" + child_pkgs_obj = PackageContainer.package_factory(child_pkgs_name, "") + if child_pkgs_obj is None and self.model_or_sim.model_type is None: + # simulation level object, try just the package type in the name + child_pkgs_name = f"{pkg_type}packages" + child_pkgs_obj = PackageContainer.package_factory( + child_pkgs_name, "" + ) + if child_pkgs_obj is None: + # see if the package is part of one of the supported model types + for model_type in MFStructure().sim_struct.model_types: + child_pkgs_name = f"{model_type}{pkg_type}packages" + child_pkgs_obj = PackageContainer.package_factory( + child_pkgs_name, "" + ) + if child_pkgs_obj is not None: + break + child_pkgs = child_pkgs_obj( + self.model_or_sim, self, pkg_type, filerecord, None, package_obj + ) + setattr(self, pkg_type, child_pkgs) + self._child_package_groups[pkg_type] = child_pkgs + + def _get_dfn_name_dict(self): + dfn_name_dict = {} + item_num = 0 + for item in self.structure.dfn_list: + if len(item) > 1: + item_name = item[1].split() + if len(item_name) > 1 and item_name[0] == "name": + dfn_name_dict[item_name[1]] = item_num + item_num += 1 + return dfn_name_dict + + def build_child_package(self, pkg_type, data, parameter_name, filerecord): + """Builds a child package. This method is only intended for FloPy + internal use.""" + if not hasattr(self, pkg_type): + self.build_child_packages_container(pkg_type, filerecord) + if data is not None: + package_group = getattr(self, pkg_type) + # build child package file name + child_path = package_group.next_default_file_path() + # create new empty child package + package_obj = PackageContainer.package_factory( + pkg_type, self.model_or_sim.model_type + ) + package = package_obj( + self, filename=child_path, child_builder_call=True + ) + assert hasattr(package, parameter_name) + + if isinstance(data, dict): + # order data correctly + dfn_name_dict = package._get_dfn_name_dict() + ordered_data_items = [] + for key, value in data.items(): + if key in dfn_name_dict: + ordered_data_items.append( + [dfn_name_dict[key], key, value] + ) + else: + ordered_data_items.append([999999, key, value]) + ordered_data_items = sorted( + ordered_data_items, key=lambda x: x[0] + ) + + # evaluate and add data to package + unused_data = {} + for order, key, value in ordered_data_items: + # if key is an attribute of the child package + if isinstance(key, str) and hasattr(package, key): + # set child package attribute + child_data_attr = getattr(package, key) + if isinstance(child_data_attr, mfdatalist.MFList): + child_data_attr.set_data(value, autofill=True) + elif isinstance(child_data_attr, mfdata.MFData): + child_data_attr.set_data(value) + elif key == "fname" or key == "filename": + child_path = value + package._filename = value + else: + setattr(package, key, value) + else: + unused_data[key] = value + if unused_data: + setattr(package, parameter_name, unused_data) + else: + setattr(package, parameter_name, data) + + # append package to list + package_group.init_package(package, child_path) + return package + + def build_mfdata(self, var_name, data=None): + """Returns the appropriate data type object (mfdatalist, mfdataarray, + or mfdatascalar) given that object the appropriate structure (looked + up based on var_name) and any data supplied. This method is for + internal FloPy library use only. + + Parameters + ---------- + var_name : str + Variable name + + data : many supported types + Data contained in this object + + Returns + ------- + data object : MFData subclass + + """ + if self.loading_package: + data = None + for key, block in self.structure.blocks.items(): + if var_name in block.data_structures: + if block.name not in self.blocks: + self.blocks[block.name] = MFBlock( + self.simulation_data, + self.dimensions, + block, + self.path + (key,), + self.model_or_sim, + self, + ) + dataset_struct = block.data_structures[var_name] + var_path = self.path + (key, var_name) + ds = self.blocks[block.name].add_dataset( + dataset_struct, data, var_path + ) + self._data_list.append(ds) + return ds + + message = 'Unable to find variable "{}" in package ' '"{}".'.format( + var_name, self.package_type + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.model_name, + self._get_pname(), + self.path, + "building data objects", + None, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self.simulation_data.debug, + ) + + def set_model_relative_path(self, model_ws): + """Sets the model path relative to the simulation's path. + + Parameters + ---------- + model_ws : str + Model path relative to the simulation's path. + + """ + # update blocks + for key, block in self.blocks.items(): + block.set_model_relative_path(model_ws) + # update sub-packages + for package in self._package_container.packagelist: + package.set_model_relative_path(model_ws) + + def set_all_data_external( + self, + check_data=True, + external_data_folder=None, + base_name=None, + binary=False, + ): + """Sets the package's list and array data to be stored externally. + + Parameters + ---------- + check_data : bool + Determine if data error checking is enabled + external_data_folder + Folder where external data will be stored + base_name: str + Base file name prefix for all files + binary: bool + Whether file will be stored as binary + """ + # set blocks + for key, block in self.blocks.items(): + file_name = os.path.split(self.filename)[1] + if base_name is not None: + file_name = f"{base_name}_{file_name}" + block.set_all_data_external( + file_name, + check_data, + external_data_folder, + binary, + ) + # set sub-packages + for package in self._package_container.packagelist: + package.set_all_data_external( + check_data, + external_data_folder, + base_name, + binary, + ) + + def set_all_data_internal(self, check_data=True): + """Sets the package's list and array data to be stored internally. + + Parameters + ---------- + check_data : bool + Determine if data error checking is enabled + + """ + # set blocks + for key, block in self.blocks.items(): + block.set_all_data_internal(check_data) + # set sub-packages + for package in self._package_container.packagelist: + package.set_all_data_internal(check_data) + + def load(self, strict=True): + """Loads the package from file. + + Parameters + ---------- + strict : bool + Enforce strict checking of data. + + Returns + ------- + success : bool + + """ + # open file + try: + fd_input_file = open( + datautil.clean_filename(self.get_file_path()), "r" + ) + except OSError as e: + if e.errno == errno.ENOENT: + message = "File {} of type {} could not be opened.".format( + self.get_file_path(), self.package_type + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.model_name, + self.package_name, + self.path, + "loading package file", + None, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self.simulation_data.debug, + ) + + try: + self._load_blocks(fd_input_file, strict) + except ReadAsArraysException as err: + fd_input_file.close() + raise ReadAsArraysException(err) + # close file + fd_input_file.close() + + if self.simulation_data.auto_set_sizes: + self._update_size_defs() + + # return validity of file + return self.is_valid() + + def is_valid(self): + """Returns whether or not this package is valid. + + Returns + ------- + is valid : bool + + """ + # Check blocks + for block in self.blocks.values(): + # Non-optional blocks must be enabled + if ( + block.structure.number_non_optional_data() > 0 + and not block.enabled + and block.is_allowed() + ): + self.last_error = ( + f'Required block "{block.block_header.name}" not enabled' + ) + return False + # Enabled blocks must be valid + if block.enabled and not block.is_valid: + self.last_error = f'Invalid block "{block.block_header.name}"' + return False + + return True + + def _load_blocks(self, fd_input_file, strict=True, max_blocks=sys.maxsize): + # init + self.simulation_data.mfdata[self.path + ("pkg_hdr_comments",)] = ( + MFComment("", self.path, self.simulation_data) + ) + self.post_block_comments = MFComment( + "", self.path, self.simulation_data + ) + + blocks_read = 0 + found_first_block = False + line = " " + while line != "": + line = fd_input_file.readline() + clean_line = line.strip() + # If comment or empty line + if MFComment.is_comment(clean_line, True): + self._store_comment(line, found_first_block) + elif len(clean_line) > 4 and clean_line[:5].upper() == "BEGIN": + # parse block header + try: + block_header_info = self._get_block_header_info( + line, self.path + ) + except MFDataException as mfde: + message = ( + "An error occurred while loading block header " + 'in line "{}".'.format(line) + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.model_name, + self._get_pname(), + self.path, + "loading block header", + None, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self.simulation_data.debug, + mfde, + ) + + # if there is more than one possible block with the same name, + # resolve the correct block to use + block_key = block_header_info.name.lower() + block_num = 1 + possible_key = f"{block_header_info.name.lower()}-{block_num}" + if possible_key in self.blocks: + block_key = possible_key + block_header_name = block_header_info.name.lower() + while ( + block_key in self.blocks + and not self.blocks[block_key].is_allowed() + ): + block_key = f"{block_header_name}-{block_num}" + block_num += 1 + + if block_key not in self.blocks: + # block name not recognized, load block as comments and + # issue a warning + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + warning_str = ( + 'WARNING: Block "{}" is not a valid block ' + "name for file type " + "{}.".format(block_key, self.package_type) + ) + print(warning_str) + self._store_comment(line, found_first_block) + while line != "": + line = fd_input_file.readline() + self._store_comment(line, found_first_block) + arr_line = datautil.PyListUtil.split_data_line(line) + if arr_line and ( + len(arr_line[0]) <= 2 + or arr_line[0][:3].upper() == "END" + ): + break + else: + found_first_block = True + skip_block = False + cur_block = self.blocks[block_key] + if cur_block.loaded: + # Only blocks defined as repeating are allowed to have + # multiple entries + header_name = block_header_info.name + if not self.structure.blocks[ + header_name.lower() + ].repeating(): + # warn and skip block + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + warning_str = ( + 'WARNING: Block "{}" has ' + "multiple entries and is not " + "intended to be a repeating " + "block ({} package" + ")".format(header_name, self.package_type) + ) + print(warning_str) + skip_block = True + bhs = cur_block.structure.block_header_structure + bhval = block_header_info.variable_strings + if ( + len(bhs) > 0 + and len(bhval) > 0 + and bhs[0].name == "iper" + ): + nper = self.simulation_data.mfdata[ + ("tdis", "dimensions", "nper") + ].get_data() + bhval_int = datautil.DatumUtil.is_int(bhval[0]) + if not bhval_int or int(bhval[0]) > nper: + # skip block when block stress period is greater + # than nper + skip_block = True + + if not skip_block: + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print( + f" loading block {cur_block.structure.name}..." + ) + # reset comments + self.post_block_comments = MFComment( + "", self.path, self.simulation_data + ) + + cur_block.load( + block_header_info, fd_input_file, strict + ) + + # write post block comment + self.simulation_data.mfdata[ + cur_block.block_headers[-1].blk_post_comment_path + ] = self.post_block_comments + + blocks_read += 1 + if blocks_read >= max_blocks: + break + else: + # treat skipped block as if it is all comments + arr_line = datautil.PyListUtil.split_data_line( + clean_line + ) + self.post_block_comments.add_text(str(line), True) + while arr_line and ( + len(line) <= 2 or arr_line[0][:3].upper() != "END" + ): + line = fd_input_file.readline() + arr_line = datautil.PyListUtil.split_data_line( + line.strip() + ) + if arr_line: + self.post_block_comments.add_text( + str(line), True + ) + self.simulation_data.mfdata[ + cur_block.block_headers[-1].blk_post_comment_path + ] = self.post_block_comments + + else: + if not ( + len(clean_line) == 0 + or (len(line) > 2 and line[:3].upper() == "END") + ): + # Record file location of beginning of unresolved text + # treat unresolved text as a comment for now + self._store_comment(line, found_first_block) + + def write(self, ext_file_action=ExtFileAction.copy_relative_paths): + """Writes the package to a file. + + Parameters + ---------- + ext_file_action : ExtFileAction + How to handle pathing of external data files. + """ + if self.simulation_data.auto_set_sizes: + self._update_size_defs() + + # create any folders in path + package_file_path = self.get_file_path() + package_folder = os.path.split(package_file_path)[0] + if package_folder and not os.path.isdir(package_folder): + os.makedirs(os.path.split(package_file_path)[0]) + + # open file + fd = open(package_file_path, "w") + + # write flopy header + if self.simulation_data.write_headers: + dt = datetime.datetime.now() + header = ( + "# File generated by Flopy version {} on {} at {}." + "\n".format( + __version__, + dt.strftime("%m/%d/%Y"), + dt.strftime("%H:%M:%S"), + ) + ) + fd.write(header) + + # write blocks + self._write_blocks(fd, ext_file_action) + + fd.close() + + def create_package_dimensions(self): + """Creates a package dimensions object. For internal FloPy library + use. + + Returns + ------- + package dimensions : PackageDimensions + + """ + model_dims = None + if self.container_type[0] == PackageContainerType.model: + model_dims = [ + modeldimensions.ModelDimensions( + self.path[0], self.simulation_data + ) + ] + else: + # this is a simulation file that does not correspond to a specific + # model. figure out which model to use and return a dimensions + # object for that model + if self.dfn_file_name[0:3] == "exg": + exchange_rec_array = self.simulation_data.mfdata[ + ("nam", "exchanges", "exchanges") + ].get_data() + if exchange_rec_array is None: + return None + for exchange in exchange_rec_array: + if exchange[1].lower() == self._filename.lower(): + model_dims = [ + modeldimensions.ModelDimensions( + exchange[2], self.simulation_data + ), + modeldimensions.ModelDimensions( + exchange[3], self.simulation_data + ), + ] + break + elif ( + self.dfn_file_name[4:7] == "gnc" + and self.model_or_sim.type == "Simulation" + ): + # get exchange file name associated with gnc package + if self.parent_file is not None: + exg_file_name = self.parent_file.filename + else: + raise Exception( + "Can not create a simulation-level " + "gnc file without a corresponding " + "exchange file. Exchange file must be " + "created first." + ) + # get models associated with exchange file from sim nam file + try: + exchange_recarray_data = ( + self.model_or_sim.name_file.exchanges.get_data() + ) + except MFDataException as mfde: + message = ( + "An error occurred while retrieving exchange " + "data from the simulation name file. The error " + "occurred while processing gnc file " + f'"{self.filename}".' + ) + raise MFDataException( + mfdata_except=mfde, + package=self._get_pname(), + message=message, + ) + assert exchange_recarray_data is not None + model_1 = None + model_2 = None + for exchange in exchange_recarray_data: + if exchange[1] == exg_file_name: + model_1 = exchange[2] + model_2 = exchange[3] + + # assign models to gnc package + model_dims = [ + modeldimensions.ModelDimensions( + model_1, self.simulation_data + ), + modeldimensions.ModelDimensions( + model_2, self.simulation_data + ), + ] + elif self.parent_file is not None: + model_dims = [] + for md in self.parent_file.dimensions.model_dim: + model_name = md.model_name + model_dims.append( + modeldimensions.ModelDimensions( + model_name, self.simulation_data + ) + ) + else: + model_dims = [ + modeldimensions.ModelDimensions(None, self.simulation_data) + ] + return modeldimensions.PackageDimensions( + model_dims, self.structure, self.path + ) + + def _store_comment(self, line, found_first_block): + # Store comment + if found_first_block: + self.post_block_comments.text += line + else: + self.simulation_data.mfdata[ + self.path + ("pkg_hdr_comments",) + ].text += line + + def _write_blocks(self, fd, ext_file_action): + # verify that all blocks are valid + if not self.is_valid(): + message = ( + 'Unable to write out model file "{}" due to the ' + "following error: " + "{} ({})".format(self._filename, self.last_error, self.path) + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.model_name, + self._get_pname(), + self.path, + "writing package blocks", + None, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self.simulation_data.debug, + ) + + # write initial comments + pkg_hdr_comments_path = self.path + ("pkg_hdr_comments",) + if pkg_hdr_comments_path in self.simulation_data.mfdata: + self.simulation_data.mfdata[ + self.path + ("pkg_hdr_comments",) + ].write(fd, False) + + # loop through blocks + block_num = 1 + for block in self.blocks.values(): + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print(f" writing block {block.structure.name}...") + # write block + block.write(fd, ext_file_action=ext_file_action) + block_num += 1 + + def get_file_path(self): + """Returns the package file's path. + + Returns + ------- + file path : str + """ + if self.path[0] in self.simulation_data.mfpath.model_relative_path: + return os.path.join( + self.simulation_data.mfpath.get_model_path(self.path[0]), + self._filename, + ) + else: + return os.path.join( + self.simulation_data.mfpath.get_sim_path(), self._filename + ) + + def export(self, f, **kwargs): + """ + Method to export a package to netcdf or shapefile based on the + extension of the file name (.shp for shapefile, .nc for netcdf) + + Parameters + ---------- + f : str + Filename + kwargs : keyword arguments + modelgrid : flopy.discretization.Grid instance + User supplied modelgrid which can be used for exporting + in lieu of the modelgrid associated with the model object + + Returns + ------- + None or Netcdf object + + """ + from .. import export + + return export.utils.package_export(f, self, **kwargs) + + def plot(self, **kwargs): + """ + Plot 2-D, 3-D, transient 2-D, and stress period list (MfList) + package input data + + Parameters + ---------- + **kwargs : dict + filename_base : str + Base file name that will be used to automatically generate + file names for output image files. Plots will be exported as + image files if file_name_base is not None. (default is None) + file_extension : str + Valid matplotlib.pyplot file extension for savefig(). Only + used if filename_base is not None. (default is 'png') + mflay : int + MODFLOW zero-based layer number to return. If None, then all + all layers will be included. (default is None) + kper : int + MODFLOW zero-based stress period number to return. (default is + zero) + key : str + MfList dictionary key. (default is None) + + Returns + ------- + axes : list + Empty list is returned if filename_base is not None. Otherwise + a list of matplotlib.pyplot.axis are returned. + + """ + from ..plot.plotutil import PlotUtilities + + if not self.plottable: + raise TypeError("Simulation level packages are not plottable") + + axes = PlotUtilities._plot_package_helper(self, **kwargs) + return axes + + @staticmethod + def add_netcdf_entries(attrs, mname, pname, data_item, auxiliary=None, mesh=None, nlay=1): + if auxiliary: + auxnames = auxiliary + else: + auxnames = [] + + def add_entry(tagname, iaux=None, layer=None): + key = tagname + name = f"{pname}" + if iaux is not None: + key = f"{key}/{iaux}" + name = f"{name}_{auxiliary[iaux]}" + else: + name = f"{name}_{tagname}" + if layer is not None: + key = f"{key}/layer{layer}" + name = f"{name}_l{layer}" + + a = {} + a["varname"] = name.lower() + a["attrs"] = {} + a["attrs"]["modflow_input"] = ( + f"{mname}/{pname}/{tagname}" + ).upper() + if iaux is not None: + a["attrs"]["modflow_iaux"] = iaux + 1 + if layer is not None: + a["attrs"]["layer"] = layer + attrs[key] = a + + iaux = None + layer = None + # TODO + #if dataset.structure.layered and mesh == "LAYERED": + if data_item.layered and mesh == "LAYERED": + if data_item.name == "aux" or data_item.name == "auxvar": + for n, auxname in enumerate(auxnames): + for l in range(nlay): + add_entry(data_item.name, n, l + 1) + else: + for l in range(nlay): + add_entry(data_item.name, layer=l + 1) + else: + if data_item.name == "aux" or data_item.name == "auxvar": + for n, auxname in enumerate(auxnames): + add_entry(data_item.name, iaux=n) + else: + add_entry(data_item.name) + + # TODO filter out aux "auxiliary" + @staticmethod + def netcdf_attrs(mtype, ptype, auxiliary=None, mesh=None, nlay=1): + import flopy.mf6.modflow as modflow + from .data.mfstructure import DfnPackage, MFSimulationStructure + from .data.mfdataarray import MFArray + attrs = {} + p = modflow.mfgwfnpf.ModflowGwfnpf + sim_struct = MFSimulationStructure() + d = DfnPackage(p) + sim_struct.add_package(d, model_file=False) + npf = sim_struct.package_struct_objs['npf'] + #print(dir(npf)) + for key, block in npf.blocks.items(): + if key != "griddata" and key != "period": + continue + #print(key) + #print(dir(block)) + #print(block.data_structures) + for d in block.data_structures: + #print(d) + if (block.data_structures[d].layered): + #print("layered") + pass + if isinstance(block.data_structures[d], MFArray): + #print(d) + pass + + for package in MFPackage.__subclasses__(): + sim_struct.process_dfn(DfnPackage(package)) + #print(f"RENO added {DfnPackage(package).dfn_file_name}") + p = DfnPackage(package) + c, sc = p.dfn_file_name.split('.')[0].split('-') + #if p.dfn_file_name == 'gwf-npf.dfn': + #c, sc = p.dfn_file_name.split('.')[0].split('-') + #print(c) + #print(sc) + #print("FOUND NPF") + if c == mtype.lower() and sc == ptype.lower(): + sim_struct.add_package(p, model_file=False) + exit + pso = sim_struct.package_struct_objs[ptype.lower()] + for key, block in pso.blocks.items(): + if key != "griddata" and key != "period": + continue + for d in block.data_structures: + print(d) + if (block.data_structures[d].layered): + print("layered") + if isinstance(block.data_structures[d], MFArray): + print(d) + if (block.data_structures[d].netcdf): + print(f"adding {d}") + MFPackage.add_netcdf_entries(attrs, mtype, ptype, block.data_structures[d], auxiliary, mesh, nlay) + + return_attrs = {} + for k in list(attrs): + return_attrs[k] = attrs[k]['attrs'] + + + return return_attrs + + + def netcdf_info(self, mesh=None): + attrs = {} + + def add_entry(tagname, iaux=None, layer=None): + key = tagname + name = f"{self.package_name}" + if iaux: + auxvar = self.dimensions.get_aux_variables()[0] + key = f"{key}/{iaux}" + name = f"{name}_{auxvar[iaux]}" + else: + name = f"{name}_{tagname}" + if layer: + key = f"{key}/layer{layer}" + name = f"{name}_l{layer}" + + a = {} + a["varname"] = name.lower() + a["attrs"] = {} + a["attrs"]["modflow_input"] = ( + f"{self.model_name}/{self.package_name}/{tagname}" + ).upper() + if iaux: + a["attrs"]["modflow_iaux"] = iaux + if layer: + a["attrs"]["layer"] = layer + attrs[key] = a + + def add_entries(name): + iaux = None + layer = None + if dataset.structure.layered and mesh == "LAYERED": + if name == "aux" or name == "auxvar": + for n, auxname in enumerate( + self.dimensions.get_aux_variables()[0] + ): + if auxname == "auxiliary" and n == 0: + continue + for l in range(self.model_or_sim.modelgrid.nlay): + add_entry(name, n, l + 1) + else: + for l in range(self.model_or_sim.modelgrid.nlay): + add_entry(name, layer=l + 1) + else: + if name == "aux" or name == "auxvar": + for n, auxname in enumerate( + self.dimensions.get_aux_variables()[0] + ): + if auxname == "auxiliary" and n == 0: + continue + add_entry(name, iaux=n) + else: + add_entry(name) + + if self.dimensions.get_aux_variables(): + auxnames = list(self.dimensions.get_aux_variables()[0]) + if len(auxnames) and auxnames[0] == "auxiliary": + auxnames.pop(0) + else: + auxnames = [] + + for key, block in self.blocks.items(): + if key != "griddata" and key != "period": + continue + for dataset in block.datasets.values(): + if isinstance(dataset, mfdataarray.MFArray): + for index, data_item in enumerate( + dataset.structure.data_item_structures + ): + if ( + dataset.structure.netcdf and + dataset.has_data() + ): + #add_entries(data_item.name) + MFPackage.add_netcdf_entries( + attrs, + self.model_name, + self.package_name, + dataset.structure, + auxnames, + mesh, + self.model_or_sim.modelgrid.nlay, + ) + + return attrs + + +class MFChildPackages: + """ + Behind the scenes code for creating an interface to access child packages + from a parent package. This class is automatically constructed by the + FloPy library and is for internal library use only. + + Parameters + ---------- + """ + + def __init__( + self, + model_or_sim, + parent, + pkg_type, + filerecord, + package=None, + package_class=None, + ): + self._packages = [] + self._filerecord = filerecord + if package is not None: + self._packages.append(package) + self._model_or_sim = model_or_sim + self._cpparent = parent + self._pkg_type = pkg_type + self._package_class = package_class + + def __init_subclass__(cls): + """Register package""" + super().__init_subclass__() + PackageContainer.packages_by_abbr[cls.package_abbr] = cls + + def __getattr__(self, attr): + if ( + "_packages" in self.__dict__ + and len(self._packages) > 0 + and hasattr(self._packages[0], attr) + ): + item = getattr(self._packages[0], attr) + return item + raise AttributeError(attr) + + def __getitem__(self, k): + if isinstance(k, int): + if k < len(self._packages): + return self._packages[k] + raise ValueError(f"Package index {k} does not exist.") + + def __setattr__(self, key, value): + if ( + key != "_packages" + and key != "_model_or_sim" + and key != "_cpparent" + and key != "_inattr" + and key != "_filerecord" + and key != "_package_class" + and key != "_pkg_type" + ): + if len(self._packages) == 0: + raise Exception( + "No {} package is currently attached to package" + " {}. Use the initialize method to create a(n) " + "{} package before attempting to access its " + "properties.".format( + self._pkg_type, self._cpparent.filename, self._pkg_type + ) + ) + package = self._packages[0] + setattr(package, key, value) + return + super().__setattr__(key, value) + + def __default_file_path_base(self, file_path, suffix=""): + stem = os.path.split(file_path)[1] + stem_lst = stem.split(".") + file_name = ".".join(stem_lst[:-1]) + if len(stem_lst) > 1: + file_ext = stem_lst[-1] + return f"{file_name}.{file_ext}{suffix}.{self._pkg_type}" + elif suffix != "": + return f"{stem}.{self._pkg_type}" + else: + return f"{stem}.{suffix}.{self._pkg_type}" + + def __file_path_taken(self, possible_path): + for package in self._packages: + # Do case insensitive compare + if package.filename.lower() == possible_path.lower(): + return True + return False + + def next_default_file_path(self): + possible_path = self.__default_file_path_base(self._cpparent.filename) + suffix = 0 + while self.__file_path_taken(possible_path): + possible_path = self.__default_file_path_base( + self._cpparent.filename, suffix + ) + suffix += 1 + return possible_path + + def init_package(self, package, fname, remove_packages=True): + if remove_packages: + # clear out existing packages + self._remove_packages() + elif fname is not None: + self._remove_packages(fname) + if fname is None: + # build a file name + fname = self.next_default_file_path() + package._filename = fname + # check file record variable + found = False + fr_data = self._filerecord.get_data() + if fr_data is not None: + for line in fr_data: + if line[0] == fname: + found = True + if not found: + # append file record variable + self._filerecord.append_data([(fname,)]) + # add the package to the list + self._packages.append(package) + + def _update_filename(self, old_fname, new_fname): + file_record = self._filerecord.get_data() + new_file_record_data = [] + if file_record is not None: + file_record_data = file_record[0] + for item in file_record_data: + base, fname = os.path.split(item) + if fname.lower() == old_fname.lower(): + if base: + new_file_record_data.append( + (os.path.join(base, new_fname),) + ) + else: + new_file_record_data.append((new_fname,)) + else: + new_file_record_data.append((item,)) + else: + new_file_record_data.append((new_fname,)) + self._filerecord.set_data(new_file_record_data) + + def _append_package(self, package, fname, update_frecord=True): + if fname is None: + # build a file name + fname = self.next_default_file_path() + package._filename = fname + + if update_frecord: + # set file record variable + file_record = self._filerecord.get_data() + file_record_data = file_record + new_file_record_data = [] + for item in file_record_data: + new_file_record_data.append((item[0],)) + new_file_record_data.append((fname,)) + self._filerecord.set_data(new_file_record_data) + + for existing_pkg in self._packages: + if existing_pkg is package: + # do not add the same package twice + return + # add the package to the list + self._packages.append(package) + + def _remove_packages(self, fname=None, only_pop_from_list=False): + rp_list = [] + for idx, package in enumerate(self._packages): + if fname is None or package.filename == fname: + if not only_pop_from_list: + self._model_or_sim.remove_package(package) + rp_list.append(idx) + for idx in reversed(rp_list): + self._packages.pop(idx) diff --git a/flopy/mf6/tmp/mfmodel.py b/flopy/mf6/tmp/mfmodel.py new file mode 100644 index 0000000000..4918b5e96b --- /dev/null +++ b/flopy/mf6/tmp/mfmodel.py @@ -0,0 +1,2229 @@ +import inspect +import os +import sys +import warnings +from typing import Optional, Union + +import numpy as np + +from ..discretization.grid import Grid +from ..discretization.modeltime import ModelTime +from ..discretization.structuredgrid import StructuredGrid +from ..discretization.unstructuredgrid import UnstructuredGrid +from ..discretization.vertexgrid import VertexGrid +from ..mbase import ModelInterface +from ..utils import datautil +from ..utils.check import mf6check +from .coordinates import modeldimensions +from .data import mfdata, mfdatalist, mfstructure +from .data.mfdatautil import DataSearchOutput, iterable +from .mfbase import ( + ExtFileAction, + FlopyException, + MFDataException, + MFFileMgmt, + PackageContainer, + PackageContainerType, + ReadAsArraysException, + VerbosityLevel, +) +from .mfpackage import MFPackage +from .utils.mfenums import DiscretizationType +from .utils.output_util import MF6Output + + +class MFModel(ModelInterface): + """ + MODFLOW-6 model base class. Represents a single model in a simulation. + + Parameters + ---------- + simulation_data : MFSimulationData + Simulation data object of the simulation this model will belong to + structure : MFModelStructure + Structure of this type of model + modelname : str + Name of the model + model_nam_file : str + Relative path to the model name file from model working folder + version : str + Version of modflow + exe_name : str + Model executable name + model_ws : str + Model working folder path + disfile : str + Relative path to dis file from model working folder + grid_type : str + Type of grid the model will use (structured, unstructured, vertices) + verbose : bool + Verbose setting for model operations (default False) + + Attributes + ---------- + name : str + Name of the model + exe_name : str + Model executable name + packages : dict of MFPackage + Dictionary of model packages + + """ + + def __init__( + self, + simulation, + model_type="gwf6", + modelname="model", + model_nam_file=None, + version="mf6", + exe_name="mf6", + add_to_simulation=True, + structure=None, + model_rel_path=".", + verbose=False, + **kwargs, + ): + self._package_container = PackageContainer(simulation.simulation_data) + self.simulation = simulation + self.simulation_data = simulation.simulation_data + self.name = modelname + self.name_file = None + self._version = version + self.model_type = model_type + self.type = "Model" + + if model_nam_file is None: + model_nam_file = f"{modelname}.nam" + + if add_to_simulation: + self.structure = simulation.register_model( + self, model_type, modelname, model_nam_file + ) + else: + self.structure = structure + self.set_model_relative_path(model_rel_path) + self.exe_name = exe_name + self.dimensions = modeldimensions.ModelDimensions( + self.name, self.simulation_data + ) + self.simulation_data.model_dimensions[modelname] = self.dimensions + self._ftype_num_dict = {} + self._package_paths = {} + self._verbose = verbose + + if model_nam_file is None: + self.model_nam_file = f"{modelname}.nam" + else: + self.model_nam_file = model_nam_file + + # check for spatial reference info in kwargs + xll = kwargs.pop("xll", None) + yll = kwargs.pop("yll", None) + self._xul = kwargs.pop("xul", None) + self._yul = kwargs.pop("yul", None) + rotation = kwargs.pop("rotation", 0.0) + crs = kwargs.pop("crs", None) + # build model grid object + self._modelgrid = Grid(crs=crs, xoff=xll, yoff=yll, angrot=rotation) + + self.start_datetime = None + # check for extraneous kwargs + if len(kwargs) > 0: + kwargs_str = ", ".join(kwargs.keys()) + excpt_str = ( + f'Extraneous kwargs "{kwargs_str}" provided to MFModel.' + ) + raise FlopyException(excpt_str) + + # build model name file + # create name file based on model type - support different model types + package_obj = PackageContainer.package_factory("nam", model_type[0:3]) + if not package_obj: + excpt_str = ( + f"Name file could not be found for model{model_type[0:3]}." + ) + raise FlopyException(excpt_str) + + self.name_file = package_obj( + self, + filename=self.model_nam_file, + pname=self.name, + _internal_package=True, + ) + + def __init_subclass__(cls): + """Register model type""" + super().__init_subclass__() + PackageContainer.modflow_models.append(cls) + PackageContainer.models_by_type[cls.model_type] = cls + + def __getattr__(self, item): + """ + __getattr__ - used to allow for getting packages as if they are + attributes + + Parameters + ---------- + item : str + 3 character package name (case insensitive) + + + Returns + ------- + pp : Package object + Package object of type :class:`flopy.pakbase.Package` + + """ + if item == "name_file" or not hasattr(self, "name_file"): + raise AttributeError(item) + + package = self.get_package(item) + if package is not None: + return package + raise AttributeError(item) + + def __setattr__(self, name, value): + if hasattr(self, name) and getattr(self, name) is not None: + attribute = object.__getattribute__(self, name) + if attribute is not None and isinstance(attribute, mfdata.MFData): + try: + if isinstance(attribute, mfdatalist.MFList): + attribute.set_data(value, autofill=True) + else: + attribute.set_data(value) + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self.name, + package="", + ) + return + super().__setattr__(name, value) + + def __repr__(self): + return self._get_data_str(True) + + def __str__(self): + return self._get_data_str(False) + + def _get_data_str(self, formal): + file_mgr = self.simulation_data.mfpath + data_str = ( + "name = {}\nmodel_type = {}\nversion = {}\nmodel_" + "relative_path = {}" + "\n\n".format( + self.name, + self.model_type, + self.version, + file_mgr.model_relative_path[self.name], + ) + ) + + for package in self.packagelist: + pk_str = package._get_data_str(formal, False) + if formal: + if len(pk_str.strip()) > 0: + data_str = ( + "{}###################\nPackage {}\n" + "###################\n\n" + "{}\n".format(data_str, package._get_pname(), pk_str) + ) + else: + pk_str = package._get_data_str(formal, False) + if len(pk_str.strip()) > 0: + data_str = ( + "{}###################\nPackage {}\n" + "###################\n\n" + "{}\n".format(data_str, package._get_pname(), pk_str) + ) + return data_str + + @property + def package_key_dict(self): + """ + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_type_dict + + @property + def package_dict(self): + """Returns a copy of the package name dictionary. + + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_dict + + @property + def package_names(self): + """Returns a list of package names. + + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_names + + @property + def package_type_dict(self): + """ + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_type_dict + + @property + def package_name_dict(self): + """ + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_name_dict + + @property + def package_filename_dict(self): + """ + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_filename_dict + + @property + def nper(self): + """Number of stress periods. + + Returns + ------- + nper : int + Number of stress periods in the simulation. + + """ + try: + return self.simulation.tdis.nper.array + except AttributeError: + return None + + @property + def modeltime(self): + """Model time discretization information. + + Returns + ------- + modeltime : ModelTime + FloPy object containing time discretization information for the + simulation. + + """ + tdis = self.simulation.get_package("tdis", type_only=True) + period_data = tdis.perioddata.get_data() + + # build steady state data + sto = self.get_package("sto", type_only=True) + if sto is None: + steady = np.full((len(period_data["perlen"])), True, dtype=bool) + else: + steady = np.full((len(period_data["perlen"])), False, dtype=bool) + ss_periods = sto.steady_state.get_active_key_dict() + for period, val in ss_periods.items(): + if val: + ss_periods[period] = sto.steady_state.get_data(period) + tr_periods = sto.transient.get_active_key_dict() + for period, val in tr_periods.items(): + if val: + tr_periods[period] = sto.transient.get_data(period) + if ss_periods: + last_ss_value = False + # loop through steady state array + for index, value in enumerate(steady): + # resolve if current index is steady state or transient + if index in ss_periods and ss_periods[index]: + last_ss_value = True + elif index in tr_periods and tr_periods[index]: + last_ss_value = False + if last_ss_value is True: + steady[index] = True + + # build model time + itmuni = tdis.time_units.get_data() + start_date_time = tdis.start_date_time.get_data() + + self._model_time = ModelTime( + perlen=period_data["perlen"], + nstp=period_data["nstp"], + tsmult=period_data["tsmult"], + time_units=itmuni, + start_datetime=start_date_time, + steady_state=steady + ) + return self._model_time + + @property + def modeldiscrit(self): + """Basic model spatial discretization information. This is used + internally prior to model spatial discretization information being + fully loaded. + + Returns + ------- + model grid : Grid subclass + FloPy object containing basic spatial discretization information + for the model. + + """ + if self.get_grid_type() == DiscretizationType.DIS: + dis = self.get_package("dis") + return StructuredGrid( + nlay=dis.nlay.get_data(), + nrow=dis.nrow.get_data(), + ncol=dis.ncol.get_data(), + ) + elif self.get_grid_type() == DiscretizationType.DISV: + dis = self.get_package("disv") + return VertexGrid( + ncpl=dis.ncpl.get_data(), nlay=dis.nlay.get_data() + ) + elif self.get_grid_type() == DiscretizationType.DISU: + dis = self.get_package("disu") + nodes = dis.nodes.get_data() + ncpl = np.array([nodes], dtype=int) + return UnstructuredGrid(ncpl=ncpl) + + @property + def modelgrid(self): + """Model spatial discretization information. + + Returns + ------- + model grid : Grid subclass + FloPy object containing spatial discretization information for the + model. + + """ + force_resync = False + if not self._mg_resync: + return self._modelgrid + if self.get_grid_type() == DiscretizationType.DIS: + dis = self.get_package("dis") + if not hasattr(dis, "_init_complete"): + if not hasattr(dis, "delr"): + # dis package has not yet been initialized + return self._modelgrid + else: + # dis package has been partially initialized + self._modelgrid = StructuredGrid( + delc=dis.delc.array, + delr=dis.delr.array, + top=None, + botm=None, + idomain=None, + lenuni=None, + crs=self._modelgrid.crs, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot, + ) + else: + botm = dis.botm.array + idomain = dis.idomain.array + if idomain is None: + force_resync = True + idomain = self._resolve_idomain(idomain, botm) + self._modelgrid = StructuredGrid( + delc=dis.delc.array, + delr=dis.delr.array, + top=dis.top.array, + botm=botm, + idomain=idomain, + lenuni=dis.length_units.array, + crs=self._modelgrid.crs, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot, + ) + elif self.get_grid_type() == DiscretizationType.DISV: + dis = self.get_package("disv") + if not hasattr(dis, "_init_complete"): + if not hasattr(dis, "cell2d"): + # disv package has not yet been initialized + return self._modelgrid + else: + # disv package has been partially initialized + self._modelgrid = VertexGrid( + vertices=dis.vertices.array, + cell2d=dis.cell2d.array, + top=None, + botm=None, + idomain=None, + lenuni=None, + crs=self._modelgrid.crs, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot, + ) + else: + botm = dis.botm.array + idomain = dis.idomain.array + if idomain is None: + force_resync = True + idomain = self._resolve_idomain(idomain, botm) + self._modelgrid = VertexGrid( + vertices=dis.vertices.array, + cell2d=dis.cell2d.array, + top=dis.top.array, + botm=botm, + idomain=idomain, + lenuni=dis.length_units.array, + crs=self._modelgrid.crs, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot, + ) + elif self.get_grid_type() == DiscretizationType.DISU: + dis = self.get_package("disu") + if not hasattr(dis, "_init_complete"): + # disu package has not yet been fully initialized + return self._modelgrid + + # check to see if ncpl can be constructed from ihc array, + # otherwise set ncpl equal to [nodes] + ihc = dis.ihc.array + iac = dis.iac.array + ncpl = UnstructuredGrid.ncpl_from_ihc(ihc, iac) + if ncpl is None: + ncpl = np.array([dis.nodes.get_data()], dtype=int) + cell2d = dis.cell2d.array + idomain = dis.idomain.array + if idomain is None: + idomain = np.ones(dis.nodes.array, dtype=int) + if cell2d is None: + if ( + self.simulation.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + print( + "WARNING: cell2d information missing. Functionality of " + "the UnstructuredGrid will be limited." + ) + + vertices = dis.vertices.array + if vertices is None: + if ( + self.simulation.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + print( + "WARNING: vertices information missing. Functionality " + "of the UnstructuredGrid will be limited." + ) + vertices = None + else: + vertices = np.array(vertices) + + self._modelgrid = UnstructuredGrid( + vertices=vertices, + cell2d=cell2d, + top=dis.top.array, + botm=dis.bot.array, + idomain=idomain, + lenuni=dis.length_units.array, + ncpl=ncpl, + crs=self._modelgrid.crs, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot, + iac=dis.iac.array, + ja=dis.ja.array, + ) + elif self.get_grid_type() == DiscretizationType.DISV1D: + dis = self.get_package("disv1d") + if not hasattr(dis, "_init_complete"): + if not hasattr(dis, "cell1d"): + # disv package has not yet been initialized + return self._modelgrid + else: + # disv package has been partially initialized + self._modelgrid = VertexGrid( + vertices=dis.vertices.array, + cell1d=dis.cell1d.array, + top=None, + botm=None, + idomain=None, + lenuni=None, + crs=self._modelgrid.crs, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot, + ) + else: + botm = dis.bottom.array + idomain = dis.idomain.array + if idomain is None: + force_resync = True + idomain = self._resolve_idomain(idomain, botm) + self._modelgrid = VertexGrid( + vertices=dis.vertices.array, + cell1d=dis.cell1d.array, + top=None, + botm=botm, + idomain=idomain, + lenuni=dis.length_units.array, + crs=self._modelgrid.crs, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot, + ) + elif self.get_grid_type() == DiscretizationType.DIS2D: + dis = self.get_package("dis2d") + if not hasattr(dis, "_init_complete"): + if not hasattr(dis, "delr"): + # dis package has not yet been initialized + return self._modelgrid + else: + # dis package has been partially initialized + self._modelgrid = StructuredGrid( + delc=dis.delc.array, + delr=dis.delr.array, + top=None, + botm=None, + idomain=None, + lenuni=None, + crs=self._modelgrid.crs, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot, + ) + else: + botm = dis.bottom.array + idomain = dis.idomain.array + if idomain is None: + force_resync = True + idomain = self._resolve_idomain(idomain, botm) + self._modelgrid = StructuredGrid( + delc=dis.delc.array, + delr=dis.delr.array, + top=None, + botm=botm, + idomain=idomain, + lenuni=dis.length_units.array, + crs=self._modelgrid.crs, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot, + ) + elif self.get_grid_type() == DiscretizationType.DISV2D: + dis = self.get_package("disv2d") + if not hasattr(dis, "_init_complete"): + if not hasattr(dis, "cell2d"): + # disv package has not yet been initialized + return self._modelgrid + else: + # disv package has been partially initialized + self._modelgrid = VertexGrid( + vertices=dis.vertices.array, + cell2d=dis.cell2d.array, + top=None, + botm=None, + idomain=None, + lenuni=None, + crs=self._modelgrid.crs, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot, + ) + else: + botm = dis.bottom.array + idomain = dis.idomain.array + if idomain is None: + force_resync = True + idomain = self._resolve_idomain(idomain, botm) + self._modelgrid = VertexGrid( + vertices=dis.vertices.array, + cell2d=dis.cell2d.array, + top=None, + botm=botm, + idomain=idomain, + lenuni=dis.length_units.array, + crs=self._modelgrid.crs, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot, + ) + else: + return self._modelgrid + + # get coordinate data from dis file + xorig = dis.xorigin.get_data() + yorig = dis.yorigin.get_data() + angrot = dis.angrot.get_data() + + # resolve offsets + if xorig is None: + xorig = self._modelgrid.xoffset + if xorig is None: + if self._xul is not None: + xorig = self._modelgrid._xul_to_xll(self._xul) + else: + xorig = 0.0 + if yorig is None: + yorig = self._modelgrid.yoffset + if yorig is None: + if self._yul is not None: + yorig = self._modelgrid._yul_to_yll(self._yul) + else: + yorig = 0.0 + if angrot is None: + angrot = self._modelgrid.angrot + self._modelgrid.set_coord_info( + xorig, + yorig, + angrot, + self._modelgrid.crs, + ) + self._mg_resync = not self._modelgrid.is_complete or force_resync + return self._modelgrid + + @property + def packagelist(self): + """List of model packages.""" + return self._package_container.packagelist + + @property + def namefile(self): + """Model namefile object.""" + return self.model_nam_file + + @property + def model_ws(self): + """Model file path.""" + file_mgr = self.simulation_data.mfpath + return file_mgr.get_model_path(self.name) + + @property + def exename(self): + """MODFLOW executable name""" + return self.exe_name + + @property + def version(self): + """Version of MODFLOW""" + return self._version + + @property + def solver_tols(self): + """Returns the solver inner hclose and rclose values. + + Returns + ------- + inner_hclose, rclose : float, float + + """ + ims = self.get_ims_package() + if ims is not None: + rclose = ims.rcloserecord.get_data() + if rclose is not None: + rclose = rclose[0][0] + return ims.inner_hclose.get_data(), rclose + return None + + @property + def laytyp(self): + """Layering type""" + try: + return self.npf.icelltype.array + except AttributeError: + return None + + @property + def hdry(self): + """Dry cell value""" + return -1e30 + + @property + def hnoflo(self): + """No-flow cell value""" + return 1e30 + + @property + def laycbd(self): + """Quasi-3D confining bed. Not supported in MODFLOW-6. + + Returns + ------- + None : None + + """ + return None + + @property + def output(self): + budgetkey = None + if self.model_type == "gwt6": + budgetkey = "MASS BUDGET FOR ENTIRE MODEL" + try: + return MF6Output(self.oc, budgetkey=budgetkey) + except AttributeError: + return MF6Output(self, budgetkey=budgetkey) + + def export(self, f, **kwargs): + """Method to export a model to a shapefile or netcdf file + + Parameters + ---------- + f : str + File name (".nc" for netcdf or ".shp" for shapefile) + or dictionary of .... + **kwargs : keyword arguments + modelgrid: flopy.discretization.Grid + User supplied modelgrid object which will supersede the built + in modelgrid object + if fmt is set to 'vtk', parameters of Vtk initializer + + """ + from ..export import utils + + return utils.model_export(f, self, **kwargs) + + def netcdf_attrs(self, mesh=None): + """Return dictionary of dataset (model) scoped attributes + Parameters + ---------- + mesh : str + mesh type if dataset is ugrid complient + """ + attrs = { + "modflow_grid" : "", + "modflow_model" : "", + } + if self.get_grid_type() == DiscretizationType.DIS: + attrs["modflow_grid"] = "STRUCTURED" + elif self.get_grid_type() == DiscretizationType.DISV: + attrs["modflow_grid"] = "VERTEX" + + attrs["modflow_model"] = f"{self.name.upper()}: MODFLOW 6 {self.model_type.upper()[0:3]} model" + + # supported => LAYERED + if mesh: + attrs["mesh"] = mesh + + return attrs + + @property + def verbose(self): + """Verbose setting for model operations (True/False)""" + return self._verbose + + @verbose.setter + def verbose(self, verbose): + """Verbose setting for model operations (True/False)""" + self._verbose = verbose + + def check(self, f=None, verbose=True, level=1): + """ + Check model data for common errors. + + Warning + ------- + The MF6 check mechanism is deprecated pending reimplementation + in a future release. While the checks API will remain in place + through 3.x, it may be unstable, and will likely change in 4.x. + + Parameters + ---------- + f : str or file handle + String defining file name or file handle for summary file + of check method output. If a string is passed a file handle + is created. If f is None, check method does not write + results to a summary file. (default is None) + verbose : bool + Boolean flag used to determine if check method results are + written to the screen + level : int + Check method analysis level. If level=0, summary checks are + performed. If level=1, full checks are performed. + + Returns + ------- + success : bool + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow.load('model.nam') + >>> m.check() + """ + + # check instance for model-level check + chk = mf6check(self, f=f, verbose=verbose, level=level) + + return self._check(chk, level) + + @staticmethod + def load_base( + cls_child, + simulation, + structure, + modelname="NewModel", + model_nam_file="modflowtest.nam", + mtype="gwf", + version="mf6", + exe_name: Union[str, os.PathLike] = "mf6", + strict=True, + model_rel_path=os.curdir, + load_only=None, + ): + """ + Class method that loads an existing model. + + Parameters + ---------- + simulation : MFSimulation + simulation object that this model is a part of + simulation_data : MFSimulationData + simulation data object + structure : MFModelStructure + structure of this type of model + model_name : str + name of the model + model_nam_file : str + relative path to the model name file from model working folder + version : str + version of modflow + exe_name : str or PathLike + model executable name or path + strict : bool + strict mode when loading files + model_rel_path : str + relative path of model folder to simulation folder + load_only : list + list of package abbreviations or package names corresponding to + packages that flopy will load. default is None, which loads all + packages. the discretization packages will load regardless of this + setting. subpackages, like time series and observations, will also + load regardless of this setting. + example list: ['ic', 'maw', 'npf', 'oc', 'my_well_package_1'] + + Returns + ------- + model : MFModel + + Examples + -------- + """ + instance = cls_child( + simulation, + modelname, + model_nam_file=model_nam_file, + version=version, + exe_name=exe_name, + add_to_simulation=False, + structure=structure, + model_rel_path=model_rel_path, + ) + + # build case consistent load_only dictionary for quick lookups + load_only = PackageContainer._load_only_dict(load_only) + + # load name file + instance.name_file.load(strict) + + # order packages + vnum = mfstructure.MFStructure().get_version_string() + # FIX: Transport - Priority packages maybe should not be hard coded + priority_packages = { + f"dis{vnum}": 1, + f"disv{vnum}": 1, + f"disu{vnum}": 1, + } + packages_ordered = [] + package_recarray = instance.simulation_data.mfdata[ + (modelname, "nam", "packages", "packages") + ] + if package_recarray.array is None: + return instance + + for item in package_recarray.get_data(): + if item[0] in priority_packages: + packages_ordered.insert(0, (item[0], item[1], item[2])) + else: + packages_ordered.append((item[0], item[1], item[2])) + + # load packages + sim_struct = mfstructure.MFStructure().sim_struct + instance._ftype_num_dict = {} + for ftype, fname, pname in packages_ordered: + ftype_orig = ftype + ftype = ftype[0:-1].lower() + if ( + ftype in structure.package_struct_objs + or ftype in sim_struct.utl_struct_objs + ): + if ( + load_only is not None + and not PackageContainer._in_pkg_list( + priority_packages, ftype_orig, pname + ) + and not PackageContainer._in_pkg_list( + load_only, ftype_orig, pname + ) + ): + if ( + simulation.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + print(f" skipping package {ftype}...") + continue + if model_rel_path and model_rel_path != ".": + # strip off model relative path from the file path + filemgr = simulation.simulation_data.mfpath + fname = filemgr.strip_model_relative_path(modelname, fname) + if ( + simulation.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + print(f" loading package {ftype}...") + # load package + instance.load_package(ftype, fname, pname, strict, None) + sim_data = simulation.simulation_data + if ftype == "dis" and not sim_data.max_columns_user_set: + # set column wrap to ncol + dis = instance.get_package("dis", type_only=True) + if dis is not None and hasattr(dis, "ncol"): + sim_data.max_columns_of_data = dis.ncol.get_data() + sim_data.max_columns_user_set = False + sim_data.max_columns_auto_set = True + # load referenced packages + if modelname in instance.simulation_data.referenced_files: + for ref_file in instance.simulation_data.referenced_files[ + modelname + ].values(): + if ( + ref_file.file_type in structure.package_struct_objs + or ref_file.file_type in sim_struct.utl_struct_objs + ) and not ref_file.loaded: + instance.load_package( + ref_file.file_type, + ref_file.file_name, + None, + strict, + ref_file.reference_path, + ) + ref_file.loaded = True + + # TODO: fix jagged lists where appropriate + + return instance + + def inspect_cells( + self, + cell_list, + stress_period=None, + output_file_path=None, + inspect_budget=True, + inspect_dependent_var=True, + ): + """ + Inspect model cells. Returns model data associated with cells. + + Parameters + ---------- + cell_list : list of tuples + List of model cells. Each model cell is a tuple of integers. + ex: [(1,1,1), (2,4,3)] + stress_period : int + For transient data qnly return data from this stress period. If + not specified or None, all stress period data will be returned. + output_file_path: str + Path to output file that will contain the inspection results + inspect_budget: bool + Inspect budget file + inspect_dependent_var: bool + Inspect head file + Returns + ------- + output : dict + Dictionary containing inspection results + + Examples + -------- + + >>> import flopy + >>> sim = flopy.mf6.MFSimulationBase.load("name", "mf6", "mf6", ".") + >>> model = sim.get_model() + >>> inspect_list = [(2, 3, 2), (0, 4, 2), (0, 2, 4)] + >>> out_file = os.path.join("temp", "inspect_AdvGW_tidal.csv") + >>> model.inspect_cells(inspect_list, output_file_path=out_file) + """ + # handle no cell case + if cell_list is None or len(cell_list) == 0: + return None + + output_by_package = {} + # loop through all packages + for pp in self.packagelist: + # call the package's "inspect_cells" method + package_output = pp.inspect_cells(cell_list, stress_period) + if len(package_output) > 0: + output_by_package[f"{pp.package_name} package"] = ( + package_output + ) + # get dependent variable + if inspect_dependent_var: + try: + if self.model_type == "gwf6": + heads = self.output.head() + name = "heads" + elif self.model_type == "gwt6": + heads = self.output.concentration() + name = "concentration" + else: + inspect_dependent_var = False + except Exception: + inspect_dependent_var = False + if inspect_dependent_var and heads is not None: + kstp_kper_lst = heads.get_kstpkper() + data_output = DataSearchOutput((name,)) + data_output.output = True + for kstp_kper in kstp_kper_lst: + if stress_period is not None and stress_period != kstp_kper[1]: + continue + head_array = np.array(heads.get_data(kstpkper=kstp_kper)) + # flatten output data in disv and disu cases + if len(cell_list[0]) == 2: + head_array = head_array[0, :, :] + elif len(cell_list[0]) == 1: + head_array = head_array[0, 0, :] + # find data matches + self.match_array_cells( + cell_list, + head_array.shape, + head_array, + kstp_kper, + data_output, + ) + if len(data_output.data_entries) > 0: + output_by_package[f"{name} output"] = [data_output] + + # get model dimensions + model_shape = self.modelgrid.shape + + # get budgets + if inspect_budget: + try: + bud = self.output.budget() + except Exception: + inspect_budget = False + if inspect_budget and bud is not None: + kstp_kper_lst = bud.get_kstpkper() + rec_names = bud.get_unique_record_names() + budget_matches = [] + for rec_name in rec_names: + # clean up binary string name + string_name = str(rec_name)[3:-1].strip() + data_output = DataSearchOutput((string_name,)) + data_output.output = True + for kstp_kper in kstp_kper_lst: + if ( + stress_period is not None + and stress_period != kstp_kper[1] + ): + continue + budget_array = np.array( + bud.get_data( + kstpkper=kstp_kper, + text=rec_name, + full3D=True, + )[0] + ) + if len(budget_array.shape) == 4: + # get rid of 4th "time" dimension + budget_array = budget_array[0, :, :, :] + # flatten output data in disv and disu cases + if len(cell_list[0]) == 2 and len(budget_array.shape) >= 3: + budget_array = budget_array[0, :, :] + elif ( + len(cell_list[0]) == 1 and len(budget_array.shape) >= 2 + ): + budget_array = budget_array[0, :] + # find data matches + if budget_array.shape != model_shape: + # no support yet for different shaped budgets like + # flow_ja_face + continue + + self.match_array_cells( + cell_list, + budget_array.shape, + budget_array, + kstp_kper, + data_output, + ) + if len(data_output.data_entries) > 0: + budget_matches.append(data_output) + if len(budget_matches) > 0: + output_by_package["budget output"] = budget_matches + + if len(output_by_package) > 0 and output_file_path is not None: + with open(output_file_path, "w") as fd: + # write document header + fd.write(f"Inspect cell results for model {self.name}\n") + output = [] + for cell in cell_list: + output.append(" ".join([str(i) for i in cell])) + output = ",".join(output) + fd.write(f"Model cells inspected,{output}\n\n") + + for package_name, matches in output_by_package.items(): + fd.write(f"Results from {package_name}\n") + for search_output in matches: + # write header line with data name + fd.write( + f",Results from " + f"{search_output.path_to_data[-1]}\n" + ) + # write data header + if search_output.transient: + if search_output.output: + fd.write(",stress_period,time_step") + else: + fd.write(",stress_period/key") + if search_output.data_header is not None: + if len(search_output.data_entry_cellids) > 0: + fd.write(",cellid") + h_columns = ",".join(search_output.data_header) + fd.write(f",{h_columns}\n") + else: + fd.write(",cellid,data\n") + # write data found + for index, data_entry in enumerate( + search_output.data_entries + ): + if search_output.transient: + sp = search_output.data_entry_stress_period[ + index + ] + if search_output.output: + fd.write(f",{sp[1]},{sp[0]}") + else: + fd.write(f",{sp}") + if search_output.data_header is not None: + if len(search_output.data_entry_cellids) > 0: + cells = search_output.data_entry_cellids[ + index + ] + output = " ".join([str(i) for i in cells]) + fd.write(f",{output}") + fd.write(self._format_data_entry(data_entry)) + else: + output = " ".join( + [ + str(i) + for i in search_output.data_entry_ids[ + index + ] + ] + ) + fd.write(f",{output}") + fd.write(self._format_data_entry(data_entry)) + fd.write("\n") + return output_by_package + + def match_array_cells( + self, cell_list, data_shape, array_data, key, data_output + ): + # loop through list of cells we are searching for + for cell in cell_list: + if len(data_shape) == 3 or data_shape[0] == "nodes": + # data is by cell + if array_data.ndim == 3 and len(cell) == 3: + data_output.data_entries.append( + array_data[cell[0], cell[1], cell[2]] + ) + data_output.data_entry_ids.append(cell) + data_output.data_entry_stress_period.append(key) + elif array_data.ndim == 2 and len(cell) == 2: + data_output.data_entries.append( + array_data[cell[0], cell[1]] + ) + data_output.data_entry_ids.append(cell) + data_output.data_entry_stress_period.append(key) + elif array_data.ndim == 1 and len(cell) == 1: + data_output.data_entries.append(array_data[cell[0]]) + data_output.data_entry_ids.append(cell) + data_output.data_entry_stress_period.append(key) + else: + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + warning_str = ( + 'WARNING: CellID "{}" not same ' + "number of dimensions as data " + "{}.".format(cell, data_output.path_to_data) + ) + print(warning_str) + elif len(data_shape) == 2: + # get data based on ncpl/lay + if array_data.ndim == 2 and len(cell) == 2: + data_output.data_entries.append( + array_data[cell[0], cell[1]] + ) + data_output.data_entry_ids.append(cell) + data_output.data_entry_stress_period.append(key) + elif array_data.ndim == 1 and len(cell) == 1: + data_output.data_entries.append(array_data[cell[0]]) + data_output.data_entry_ids.append(cell) + data_output.data_entry_stress_period.append(key) + elif len(data_shape) == 1: + # get data based on nodes + if len(cell) == 1 and array_data.ndim == 1: + data_output.data_entries.append(array_data[cell[0]]) + data_output.data_entry_ids.append(cell) + data_output.data_entry_stress_period.append(key) + + @staticmethod + def _format_data_entry(data_entry): + output = "" + if iterable(data_entry, True): + for item in data_entry: + if isinstance(item, tuple): + formatted = " ".join([str(i) for i in item]) + output = f"{output},{formatted}" + else: + output = f"{output},{item}" + return f"{output}\n" + else: + return f",{data_entry}\n" + + def write(self, ext_file_action=ExtFileAction.copy_relative_paths): + """ + Writes out model's package files. + + Parameters + ---------- + ext_file_action : ExtFileAction + Defines what to do with external files when the simulation path has + changed. defaults to copy_relative_paths which copies only files + with relative paths, leaving files defined by absolute paths fixed. + + """ + + # write name file + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + print(" writing model name file...") + + self.name_file.write(ext_file_action=ext_file_action) + + if not self.simulation_data.max_columns_user_set: + grid_type = self.get_grid_type() + if grid_type == DiscretizationType.DIS: + self.simulation_data.max_columns_of_data = self.dis.ncol.get_data() + self.simulation_data.max_columns_user_set = False + self.simulation_data.max_columns_auto_set = True + + # write packages + for pp in self.packagelist: + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + print(f" writing package {pp._get_pname()}...") + pp.write(ext_file_action=ext_file_action) + + def get_grid_type(self): + """ + Return the type of grid used by model 'model_name' in simulation + containing simulation data 'simulation_data'. + + Returns + ------- + grid type : DiscretizationType + """ + package_recarray = self.name_file.packages + structure = mfstructure.MFStructure() + if ( + package_recarray.search_data( + f"dis{structure.get_version_string()}", 0 + ) + is not None + ): + return DiscretizationType.DIS + elif ( + package_recarray.search_data( + f"disv{structure.get_version_string()}", 0 + ) + is not None + ): + return DiscretizationType.DISV + elif ( + package_recarray.search_data( + f"disu{structure.get_version_string()}", 0 + ) + is not None + ): + return DiscretizationType.DISU + elif ( + package_recarray.search_data( + f"disv1d{structure.get_version_string()}", 0 + ) + is not None + ): + return DiscretizationType.DISV1D + elif ( + package_recarray.search_data( + f"dis2d{structure.get_version_string()}", 0 + ) + is not None + ): + return DiscretizationType.DIS2D + elif ( + package_recarray.search_data( + f"disv2d{structure.get_version_string()}", 0 + ) + is not None + ): + return DiscretizationType.DISV2D + + return DiscretizationType.UNDEFINED + + def get_ims_package(self): + """Get the IMS package associated with this model. + + Returns + ------- + IMS package : ModflowIms + """ + solution_group = self.simulation.name_file.solutiongroup.get_data(0) + for record in solution_group: + for name in record.dtype.names: + if name == "slntype" or name == "slnfname": + continue + if record[name] == self.name: + return self.simulation.get_solution_package( + record.slnfname + ) + return None + + def get_steadystate_list(self): + """Returns a list of stress periods that are steady state. + + Returns + ------- + steady state list : list + + """ + ss_list = [] + tdis = self.simulation.get_package("tdis") + period_data = tdis.perioddata.get_data() + index = 0 + pd_len = len(period_data) + while index < pd_len: + ss_list.append(True) + index += 1 + + storage = self.get_package("sto", type_only=True) + if storage is not None: + tr_keys = storage.transient.get_keys(True) + ss_keys = storage.steady_state.get_keys(True) + for key in tr_keys: + ss_list[key] = False + for ss_list_key in range(key + 1, len(ss_list)): + for ss_key in ss_keys: + if ss_key == ss_list_key: + break + ss_list[key] = False + return ss_list + + def is_valid(self): + """ + Checks the validity of the model and all of its packages + + Returns + ------- + valid : bool + + """ + + # valid name file + if not self.name_file.is_valid(): + return False + + # valid packages + for pp in self.packagelist: + if not pp.is_valid(): + return False + + # required packages exist + for package_struct in self.structure.package_struct_objs.values(): + if ( + not package_struct.optional + and package_struct.file_type + not in self._package_container.package_type_dict + ): + return False + + return True + + def set_model_relative_path(self, model_ws): + """ + Sets the file path to the model folder relative to the simulation + folder and updates all model file paths, placing them in the model + folder. + + Parameters + ---------- + model_ws : str + Model working folder relative to simulation working folder + + """ + # set all data internal + self.set_all_data_internal(False) + + # update path in the file manager + file_mgr = self.simulation_data.mfpath + file_mgr.set_last_accessed_model_path() + path = model_ws + file_mgr.model_relative_path[self.name] = path + + if ( + model_ws + and model_ws != "." + and self.simulation.name_file is not None + ): + model_folder_path = file_mgr.get_model_path(self.name) + if not os.path.exists(model_folder_path): + # make new model folder + os.makedirs(model_folder_path) + # update model name file location in simulation name file + models = self.simulation.name_file.models + models_data = models.get_data() + for index, entry in enumerate(models_data): + old_model_file_name = os.path.split(entry[1])[1] + old_model_base_name = os.path.splitext(old_model_file_name)[0] + if ( + old_model_base_name.lower() == self.name.lower() + or self.name == entry[2] + ): + models_data[index][1] = os.path.join( + path, old_model_file_name + ) + break + models.set_data(models_data) + + if self.name_file is not None: + # update listing file location in model name file + list_file = self.name_file.list.get_data() + if list_file: + path, list_file_name = os.path.split(list_file) + try: + self.name_file.list.set_data( + os.path.join(path, list_file_name) + ) + except MFDataException as mfde: + message = ( + "Error occurred while setting relative " + 'path "{}" in model ' + '"{}".'.format( + os.path.join(path, list_file_name), self.name + ) + ) + raise MFDataException( + mfdata_except=mfde, + model=self.model_name, + package=self.name_file._get_pname(), + message=message, + ) + # update package file locations in model name file + packages = self.name_file.packages + packages_data = packages.get_data() + if packages_data is not None: + for index, entry in enumerate(packages_data): + # get package object associated with entry + package = None + if len(entry) >= 3: + package = self.get_package(entry[2]) + if package is None: + package = self.get_package(entry[0]) + if package is not None: + # combine model relative path with package path + packages_data[index][1] = os.path.join( + path, package.filename + ) + else: + # package not found, create path based on + # information in name file + old_package_name = os.path.split(entry[1])[-1] + packages_data[index][1] = os.path.join( + path, old_package_name + ) + packages.set_data(packages_data) + # update files referenced from within packages + for package in self.packagelist: + package.set_model_relative_path(model_ws) + + def _remove_package_from_dictionaries(self, package): + # remove package from local dictionaries and lists + if package.path in self._package_paths: + del self._package_paths[package.path] + self._package_container.remove_package(package) + + def get_package(self, name=None, type_only=False, name_only=False): + """ + Finds a package by package name, package key, package type, or partial + package name. returns either a single package, a list of packages, + or None. + + Parameters + ---------- + name : str + Name or type of the package, 'my-riv-1, 'RIV', 'LPF', etc. + type_only : bool + Search for package by type only + name_only : bool + Search for package by name only + + Returns + ------- + pp : Package object + + """ + return self._package_container.get_package(name, type_only, name_only) + + def remove_package(self, package_name): + """ + Removes package and all child packages from the model. + `package_name` can be the package's name, type, or package object to + be removed from the model. + + Parameters + ---------- + package_name : str + Package name, package type, or package object to be removed from + the model. + + """ + if isinstance(package_name, MFPackage): + packages = [package_name] + else: + packages = self.get_package(package_name) + if not isinstance(packages, list) and packages is not None: + packages = [packages] + if packages is None: + return + for package in packages: + if package.model_or_sim.name != self.name: + except_text = ( + "Package can not be removed from model " + "{self.model_name} since it is not part of it." + ) + raise mfstructure.FlopyException(except_text) + + self._remove_package_from_dictionaries(package) + + try: + # remove package from name file + package_data = self.name_file.packages.get_data() + except MFDataException as mfde: + message = ( + "Error occurred while reading package names " + "from name file in model " + f'"{self.name}"' + ) + raise MFDataException( + mfdata_except=mfde, + model=self.model_name, + package=self.name_file._get_pname(), + message=message, + ) + try: + new_rec_array = None + for item in package_data: + filename = os.path.basename(item[1]) + if filename != package.filename: + if new_rec_array is None: + new_rec_array = np.rec.array( + [item.tolist()], package_data.dtype + ) + else: + new_rec_array = np.hstack((item, new_rec_array)) + except: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.structure.get_model(), + self.structure.get_package(), + self._path, + "building package recarray", + self.structure.name, + inspect.stack()[0][3], + type_, + value_, + traceback_, + None, + self.simulation_data.debug, + ) + try: + self.name_file.packages.set_data(new_rec_array) + except MFDataException as mfde: + message = ( + "Error occurred while setting package names " + f'from name file in model "{self.name}". Package name ' + f"data:\n{new_rec_array}" + ) + raise MFDataException( + mfdata_except=mfde, + model=self.model_name, + package=self.name_file._get_pname(), + message=message, + ) + + # build list of child packages + child_package_list = [] + for pkg in self.packagelist: + if ( + pkg.parent_file is not None + and pkg.parent_file.path == package.path + ): + child_package_list.append(pkg) + # remove child packages + for child_package in child_package_list: + self._remove_package_from_dictionaries(child_package) + + def update_package_filename(self, package, new_name): + """ + Updates the filename for a package. For internal flopy use only. + + Parameters + ---------- + package : MFPackage + Package object + new_name : str + New package name + """ + try: + # get namefile package data + package_data = self.name_file.packages.get_data() + except MFDataException as mfde: + message = ( + "Error occurred while updating package names " + "from name file in model " + f'"{self.name}".' + ) + raise MFDataException( + mfdata_except=mfde, + model=self.model_name, + package=self.name_file._get_pname(), + message=message, + ) + try: + file_mgr = self.simulation_data.mfpath + model_rel_path = file_mgr.model_relative_path[self.name] + # update namefile package data with new name + new_rec_array = None + old_leaf = os.path.split(package.filename)[1] + for item in package_data: + leaf = os.path.split(item[1])[1] + if leaf == old_leaf: + item[1] = os.path.join(model_rel_path, new_name) + + if new_rec_array is None: + new_rec_array = np.rec.array( + [item.tolist()], package_data.dtype + ) + else: + new_rec_array = np.hstack((item, new_rec_array)) + except: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.structure.get_model(), + self.structure.get_package(), + self._path, + "updating package filename", + self.structure.name, + inspect.stack()[0][3], + type_, + value_, + traceback_, + None, + self.simulation_data.debug, + ) + try: + self.name_file.packages.set_data(new_rec_array) + except MFDataException as mfde: + message = ( + "Error occurred while updating package names " + f'from name file in model "{self.name}". Package name ' + f"data:\n{new_rec_array}" + ) + raise MFDataException( + mfdata_except=mfde, + model=self.model_name, + package=self.name_file._get_pname(), + message=message, + ) + + def rename_all_packages(self, name): + """Renames all package files in the model. + + Parameters + ---------- + name : str + Prefix of package names. Packages files will be named + .. + + """ + nam_filename = f"{name}.nam" + self.simulation.rename_model_namefile(self, nam_filename) + self.name_file.filename = nam_filename + self.model_nam_file = nam_filename + package_type_count = {} + for package in self.packagelist: + if package.package_type not in package_type_count: + base_filename, leaf = os.path.split(package.filename) + lleaf = leaf.split(".") + if len(lleaf) > 1: + # keep existing extension + ext = lleaf[-1] + else: + # no extension found, create a new one + ext = package.package_type + new_fileleaf = f"{name}.{ext}" + if base_filename != "": + package.filename = os.path.join( + base_filename, new_fileleaf + ) + else: + package.filename = new_fileleaf + package_type_count[package.package_type] = 1 + else: + package_type_count[package.package_type] += 1 + package.filename = "{}_{}.{}".format( + name, + package_type_count[package.package_type], + package.package_type, + ) + + def set_all_data_external( + self, + check_data=True, + external_data_folder=None, + base_name=None, + binary=False, + ): + """Sets the model's list and array data to be stored externally. + + Warning + ------- + The MF6 check mechanism is deprecated pending reimplementation + in a future release. While the checks API will remain in place + through 3.x, it may be unstable, and will likely change in 4.x. + + Parameters + ---------- + check_data : bool + Determines if data error checking is enabled during this + process. + external_data_folder + Folder, relative to the simulation path or model relative path + (see use_model_relative_path parameter), where external data + will be stored + base_name: str + Base file name prefix for all files + binary: bool + Whether file will be stored as binary + + """ + for package in self.packagelist: + package.set_all_data_external( + check_data, + external_data_folder, + base_name, + binary, + ) + + def set_all_data_internal(self, check_data=True): + """Sets the model's list and array data to be stored externally. + + Parameters + ---------- + check_data : bool + Determines if data error checking is enabled during this + process. + + """ + for package in self.packagelist: + package.set_all_data_internal(check_data) + + def register_package( + self, + package, + add_to_package_list=True, + set_package_name=True, + set_package_filename=True, + ): + """ + Registers a package with the model. This method is used internally + by FloPy and is not intended for use by the end user. + + Parameters + ---------- + package : MFPackage + Package to register + add_to_package_list : bool + Add package to lookup list + set_package_name : bool + Produce a package name for this package + set_package_filename : bool + Produce a filename for this package + + Returns + ------- + path, package structure : tuple, MFPackageStructure + + """ + package.container_type = [PackageContainerType.model] + if package.parent_file is not None: + path = package.parent_file.path + (package.package_type,) + else: + path = (self.name, package.package_type) + package_struct = self.structure.get_package_struct( + package.package_type + ) + if add_to_package_list and path in self._package_paths: + if ( + package_struct is not None + and not package_struct.multi_package_support + and not isinstance(package.parent_file, MFPackage) + ): + # package of this type already exists, replace it + self.remove_package(package.package_type) + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + print( + "WARNING: Package with type {} already exists. " + "Replacing existing package" + ".".format(package.package_type) + ) + elif ( + not set_package_name + and package.package_name + in self._package_container.package_name_dict + ): + # package of this type with this name already + # exists, replace it + self.remove_package( + self._package_container.package_name_dict[ + package.package_name + ] + ) + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + print( + "WARNING: Package with name {} already exists. " + "Replacing existing package" + ".".format(package.package_name) + ) + + # make sure path is unique + if path in self._package_paths: + path_iter = datautil.PathIter(path) + for new_path in path_iter: + if new_path not in self._package_paths: + path = new_path + break + self._package_paths[path] = 1 + + if package.package_type.lower() == "nam": + if not package.internal_package: + excpt_str = ( + "Unable to register nam file. Do not create your own nam " + "files. Nam files are automatically created and managed " + "for you by FloPy." + ) + print(excpt_str) + raise FlopyException(excpt_str) + + return path, self.structure.name_file_struct_obj + + package_extension = package.package_type + if set_package_name: + # produce a default package name + if ( + package_struct is not None + and package_struct.multi_package_support + ): + # check for other registered packages of this type + name_iter = datautil.NameIter(package.package_type, False) + for package_name in name_iter: + if ( + package_name + not in self._package_container.package_name_dict + ): + package.package_name = package_name + suffix = package_name.split("_") + if ( + len(suffix) > 1 + and datautil.DatumUtil.is_int(suffix[-1]) + and suffix[-1] != "0" + ): + # update file extension to make unique + package_extension = ( + f"{package_extension}_{suffix[-1]}" + ) + break + else: + package.package_name = package.package_type + + if set_package_filename: + # filename uses model base name + package._filename = f"{self.name}.{package.package_type}" + if ( + package._filename + in self._package_container.package_filename_dict + ): + # auto generate a unique file name and register it + file_name = MFFileMgmt.unique_file_name( + package._filename, + self._package_container.package_filename_dict, + ) + package._filename = file_name + + if add_to_package_list: + self._package_container.add_package(package) + + # add obs file to name file if it does not have a parent + if package.package_type in self.structure.package_struct_objs or ( + package.package_type == "obs" and package.parent_file is None + ): + # update model name file + pkg_type = package.package_type.upper() + if ( + package.package_type != "obs" and + self.structure.package_struct_objs[ + package.package_type + ].read_as_arrays + ): + pkg_type = pkg_type[0:-1] + # Model Assumption - assuming all name files have a package + # recarray + file_mgr = self.simulation_data.mfpath + model_rel_path = file_mgr.model_relative_path[self.name] + if model_rel_path != ".": + package_rel_path = os.path.join( + model_rel_path, package.filename + ) + else: + package_rel_path = package.filename + self.name_file.packages.update_record( + [ + f"{pkg_type}6", + package_rel_path, + package.package_name, + ], + 0, + ) + if package_struct is not None: + return (path, package_struct) + else: + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + print( + "WARNING: Unable to register unsupported file type {} " + "for model {}.".format(package.package_type, self.name) + ) + return None, None + + def load_package( + self, + ftype, + fname, + pname, + strict, + ref_path, + dict_package_name=None, + parent_package: Optional[MFPackage] = None, + ): + """ + Loads a package from a file. This method is used internally by FloPy + and is not intended for the end user. + + Parameters + ---------- + ftype : str + the file type + fname : str + the name of the file containing the package input + pname : str + the user-defined name for the package + strict : bool + strict mode when loading the file + ref_path : str + path to the file. uses local path if set to None + dict_package_name : str + package name for dictionary lookup + parent_package : MFPackage + parent package + + Examples + -------- + """ + if ref_path is not None: + fname = os.path.join(ref_path, fname) + sim_struct = mfstructure.MFStructure().sim_struct + if ( + ftype in self.structure.package_struct_objs + and self.structure.package_struct_objs[ftype].multi_package_support + ) or ( + ftype in sim_struct.utl_struct_objs + and sim_struct.utl_struct_objs[ftype].multi_package_support + ): + # resolve dictionary name for package + if dict_package_name is not None: + if parent_package is not None: + dict_package_name = f"{parent_package.path[-1]}_{ftype}" + else: + # use dict_package_name as the base name + if ftype in self._ftype_num_dict: + self._ftype_num_dict[dict_package_name] += 1 + else: + self._ftype_num_dict[dict_package_name] = 0 + dict_package_name = "{}_{}".format( + dict_package_name, + self._ftype_num_dict[dict_package_name], + ) + else: + # use ftype as the base name + if ftype in self._ftype_num_dict: + self._ftype_num_dict[ftype] += 1 + else: + self._ftype_num_dict[ftype] = 1 + if pname is not None: + dict_package_name = pname + else: + dict_package_name = ( + f"{ftype}-{self._ftype_num_dict[ftype]}" + ) + else: + dict_package_name = ftype + + # clean up model type text + model_type = self.structure.model_type + while datautil.DatumUtil.is_int(model_type[-1]): + model_type = model_type[0:-1] + + # create package + package_obj = PackageContainer.package_factory(ftype, model_type) + package = package_obj( + self, + filename=fname, + pname=dict_package_name, + loading_package=True, + parent_file=parent_package, + _internal_package=True, + ) + try: + package.load(strict) + except ReadAsArraysException: + # create ReadAsArrays package and load it instead + package_obj = PackageContainer.package_factory( + f"{ftype}a", model_type + ) + package = package_obj( + self, + filename=fname, + pname=dict_package_name, + loading_package=True, + parent_file=parent_package, + _internal_package=True, + ) + package.load(strict) + + # register child package with the model + self._package_container.add_package(package) + if parent_package is not None: + # register child package with the parent package + parent_package.add_package(package) + + return package + + def plot(self, SelPackList=None, **kwargs): + """ + Plot 2-D, 3-D, transient 2-D, and stress period list (MfList) + model input data from a model instance + + Args: + model: Flopy model instance + SelPackList: (list) list of package names to plot, if none + all packages will be plotted + + **kwargs : dict + filename_base : str + Base file name that will be used to automatically generate file + names for output image files. Plots will be exported as image + files if file_name_base is not None. (default is None) + file_extension : str + Valid matplotlib.pyplot file extension for savefig(). Only used + if filename_base is not None. (default is 'png') + mflay : int + MODFLOW zero-based layer number to return. If None, then all + all layers will be included. (default is None) + kper : int + MODFLOW zero-based stress period number to return. + (default is zero) + key : str + MfList dictionary key. (default is None) + + Returns: + axes : list + Empty list is returned if filename_base is not None. Otherwise + a list of matplotlib.pyplot.axis are returned. + """ + from ..plot.plotutil import PlotUtilities + + axes = PlotUtilities._plot_model_helper( + self, SelPackList=SelPackList, **kwargs + ) + + return axes + + @staticmethod + def _resolve_idomain(idomain, botm): + if idomain is None: + if botm is None: + return idomain + else: + return np.ones_like(botm) + return idomain diff --git a/flopy/mf6/tmp/mfpackage.py b/flopy/mf6/tmp/mfpackage.py new file mode 100644 index 0000000000..8ab096f79a --- /dev/null +++ b/flopy/mf6/tmp/mfpackage.py @@ -0,0 +1,3666 @@ +import copy +import datetime +import errno +import inspect +import os +import sys +import warnings + +import numpy as np + +from ..mbase import ModelInterface +from ..pakbase import PackageInterface +from ..utils import datautil +from ..utils.check import mf6check +from ..version import __version__ +from .coordinates import modeldimensions +from .data import ( + mfdata, + mfdataarray, + mfdatalist, + mfdataplist, + mfdatascalar, + mfstructure, +) +from .data.mfdatautil import DataSearchOutput, MFComment, cellids_equal +from .data.mfstructure import DatumType, MFDataItemStructure, MFStructure +from .mfbase import ( + ExtFileAction, + FlopyException, + MFDataException, + MFFileMgmt, + MFInvalidTransientBlockHeaderException, + PackageContainer, + PackageContainerType, + ReadAsArraysException, + VerbosityLevel, +) +from .utils.output_util import MF6Output + + +class MFBlockHeader: + """ + Represents the header of a block in a MF6 input file. This class is used + internally by FloPy and its direct use by a user of this library is not + recommend. + + Parameters + ---------- + name : str + Block name + variable_strings : list + List of strings that appear after the block name + comment : MFComment + Comment text in the block header + + Attributes + ---------- + name : str + Block name + variable_strings : list + List of strings that appear after the block name + comment : MFComment + Comment text in the block header + data_items : list + List of MFVariable of the variables contained in this block + + """ + + def __init__( + self, + name, + variable_strings, + comment, + simulation_data=None, + path=None, + block=None, + ): + self.name = name + self.variable_strings = variable_strings + self.block = block + if not ( + (simulation_data is None and path is None) + or (simulation_data is not None and path is not None) + ): + raise FlopyException( + "Block header must be initialized with both " + "simulation_data and path or with neither." + ) + if simulation_data is None: + self.comment = comment + self.simulation_data = None + self.path = path + self.comment_path = None + else: + self.connect_to_dict(simulation_data, path, comment) + # TODO: Get data_items from dictionary + self.data_items = [] + # build block comment paths + self.blk_trailing_comment_path = ("blk_trailing_comment",) + self.blk_post_comment_path = ("blk_post_comment",) + if isinstance(path, list): + path = tuple(path) + if path is not None: + self.blk_trailing_comment_path = path + ( + name, + "blk_trailing_comment", + ) + self.blk_post_comment_path = path + ( + name, + "blk_post_comment", + ) + if self.blk_trailing_comment_path not in simulation_data.mfdata: + simulation_data.mfdata[self.blk_trailing_comment_path] = ( + MFComment("", "", simulation_data, 0) + ) + if self.blk_post_comment_path not in simulation_data.mfdata: + simulation_data.mfdata[self.blk_post_comment_path] = MFComment( + "\n", "", simulation_data, 0 + ) + else: + self.blk_trailing_comment_path = ("blk_trailing_comment",) + self.blk_post_comment_path = ("blk_post_comment",) + + def __lt__(self, other): + transient_key = self.get_transient_key() + if transient_key is None: + return True + else: + other_key = other.get_transient_key() + if other_key is None: + return False + else: + return transient_key < other_key + + def build_header_variables( + self, + simulation_data, + block_header_structure, + block_path, + data, + dimensions, + ): + """Builds data objects to hold header variables.""" + self.data_items = [] + var_path = block_path + (block_header_structure[0].name,) + + # fix up data + fixed_data = [] + if ( + block_header_structure[0].data_item_structures[0].type + == DatumType.keyword + ): + data_item = block_header_structure[0].data_item_structures[0] + fixed_data.append(data_item.name) + if isinstance(data, tuple): + data = list(data) + if isinstance(data, list): + fixed_data = fixed_data + data + else: + fixed_data.append(data) + if len(fixed_data) > 0: + fixed_data = [tuple(fixed_data)] + # create data object + new_data = self.block.data_factory( + simulation_data, + None, + block_header_structure[0], + True, + var_path, + dimensions, + fixed_data, + ) + + self.add_data_item(new_data, data) + + def add_data_item(self, new_data, data): + """Adds data to the block.""" + self.data_items.append(new_data) + while isinstance(data, list): + if len(data) > 0: + data = data[0] + else: + data = None + if not isinstance(data, tuple): + data = (data,) + self.blk_trailing_comment_path += data + self.blk_post_comment_path += data + + def is_same_header(self, block_header): + """Checks if `block_header` is the same header as this header.""" + if len(self.variable_strings) > 0: + if len(self.variable_strings) != len( + block_header.variable_strings + ): + return False + else: + for sitem, oitem in zip( + self.variable_strings, block_header.variable_strings + ): + if sitem != oitem: + return False + return True + elif ( + len(self.data_items) > 0 and len(block_header.variable_strings) > 0 + ): + typ_obj = ( + self.data_items[0].structure.data_item_structures[0].type_obj + ) + if typ_obj == int or typ_obj == float: + return bool( + self.variable_strings[0] + == block_header.variable_strings[0] + ) + else: + return True + elif len(self.data_items) == len(block_header.variable_strings): + return True + return False + + def get_comment(self): + """Get block header comment""" + if self.simulation_data is None: + return self.comment + else: + return self.simulation_data.mfdata[self.comment_path] + + def connect_to_dict(self, simulation_data, path, comment=None): + """Add comment to the simulation dictionary""" + self.simulation_data = simulation_data + self.path = path + self.comment_path = path + ("blk_hdr_comment",) + if comment is None: + simulation_data.mfdata[self.comment_path] = self.comment + else: + simulation_data.mfdata[self.comment_path] = comment + self.comment = None + + def write_header(self, fd): + """Writes block header to file object `fd`. + + Parameters + ---------- + fd : file object + File object to write block header to. + + """ + fd.write(f"BEGIN {self.name}") + if len(self.data_items) > 0: + if isinstance(self.data_items[0], mfdatascalar.MFScalar): + one_based = ( + self.data_items[0].structure.type == DatumType.integer + ) + entry = self.data_items[0].get_file_entry( + values_only=True, one_based=one_based + ) + else: + entry = self.data_items[0].get_file_entry() + fd.write(str(entry.rstrip())) + if len(self.data_items) > 1: + for data_item in self.data_items[1:]: + entry = data_item.get_file_entry(values_only=True) + fd.write(str(entry).rstrip()) + if self.get_comment().text: + fd.write(" ") + self.get_comment().write(fd) + fd.write("\n") + + def write_footer(self, fd): + """Writes block footer to file object `fd`. + + Parameters + ---------- + fd : file object + File object to write block footer to. + + """ + fd.write(f"END {self.name}") + if len(self.data_items) > 0: + one_based = self.data_items[0].structure.type == DatumType.integer + if isinstance(self.data_items[0], mfdatascalar.MFScalar): + entry = self.data_items[0].get_file_entry( + values_only=True, one_based=one_based + ) + else: + entry = self.data_items[0].get_file_entry() + fd.write(str(entry.rstrip())) + fd.write("\n") + + def get_transient_key(self, data_path=None): + """Get transient key associated with this block header.""" + transient_key = None + for index in range(0, len(self.data_items)): + if self.data_items[index].structure.type != DatumType.keyword: + if data_path == self.data_items[index].path: + # avoid infinite recursion + return True + transient_key = self.data_items[index].get_data() + if isinstance(transient_key, np.recarray): + item_struct = self.data_items[index].structure + key_index = item_struct.first_non_keyword_index() + if not ( + key_index is not None + and len(transient_key[0]) > key_index + ): + if key_index is None: + raise FlopyException( + "Block header index could " + "not be determined." + ) + else: + raise FlopyException( + 'Block header index "{}" ' + 'must be less than "{}"' + ".".format(key_index, len(transient_key[0])) + ) + transient_key = transient_key[0][key_index] + break + return transient_key + + +class MFBlock: + """ + Represents a block in a MF6 input file. This class is used internally + by FloPy and use by users of the FloPy library is not recommended. + + Parameters + ---------- + simulation_data : MFSimulationData + Data specific to this simulation + dimensions : MFDimensions + Describes model dimensions including model grid and simulation time + structure : MFVariableStructure + Structure describing block + path : tuple + Unique path to block + + Attributes + ---------- + block_headers : MFBlockHeader + Block header text (BEGIN/END), header variables, comments in the + header + structure : MFBlockStructure + Structure describing block + path : tuple + Unique path to block + datasets : OrderDict + Dictionary of dataset objects with keys that are the name of the + dataset + datasets_keyword : dict + Dictionary of dataset objects with keys that are key words to identify + start of dataset + enabled : bool + If block is being used in the simulation + + """ + + def __init__( + self, + simulation_data, + dimensions, + structure, + path, + model_or_sim, + container_package, + ): + self._simulation_data = simulation_data + self._dimensions = dimensions + self._model_or_sim = model_or_sim + self._container_package = container_package + self.block_headers = [ + MFBlockHeader( + structure.name, + [], + MFComment("", path, simulation_data, 0), + simulation_data, + path, + self, + ) + ] + self.structure = structure + self.path = path + self.datasets = {} + self.datasets_keyword = {} + # initially disable if optional + self.enabled = structure.number_non_optional_data() > 0 + self.loaded = False + self.external_file_name = None + self._structure_init() + + def __repr__(self): + return self._get_data_str(True) + + def __str__(self): + return self._get_data_str(False) + + def _get_data_str(self, formal): + data_str = "" + for dataset in self.datasets.values(): + if formal: + ds_repr = repr(dataset) + if len(ds_repr.strip()) > 0: + data_str = ( + f"{data_str}{dataset.structure.name}\n{dataset!r}\n" + ) + else: + ds_str = str(dataset) + if len(ds_str.strip()) > 0: + data_str = ( + f"{data_str}{dataset.structure.name}\n{dataset!s}\n" + ) + return data_str + + # return an MFScalar, MFList, or MFArray + def data_factory( + self, + sim_data, + model_or_sim, + structure, + enable, + path, + dimensions, + data=None, + package=None, + ): + """Creates the appropriate data child object derived from MFData.""" + data_type = structure.get_datatype() + # examine the data structure and determine the data type + if ( + data_type == mfstructure.DataType.scalar_keyword + or data_type == mfstructure.DataType.scalar + ): + return mfdatascalar.MFScalar( + sim_data, + model_or_sim, + structure, + data, + enable, + path, + dimensions, + ) + elif ( + data_type == mfstructure.DataType.scalar_keyword_transient + or data_type == mfstructure.DataType.scalar_transient + ): + trans_scalar = mfdatascalar.MFScalarTransient( + sim_data, model_or_sim, structure, enable, path, dimensions + ) + if data is not None: + trans_scalar.set_data(data, key=0) + return trans_scalar + elif data_type == mfstructure.DataType.array: + return mfdataarray.MFArray( + sim_data, + model_or_sim, + structure, + data, + enable, + path, + dimensions, + self, + ) + elif data_type == mfstructure.DataType.array_transient: + trans_array = mfdataarray.MFTransientArray( + sim_data, + model_or_sim, + structure, + enable, + path, + dimensions, + self, + ) + if data is not None: + trans_array.set_data(data, key=0) + return trans_array + elif data_type == mfstructure.DataType.list: + if ( + structure.basic_item + and self._container_package.package_type.lower() != "nam" + and self._simulation_data.use_pandas + ): + return mfdataplist.MFPandasList( + sim_data, + model_or_sim, + structure, + data, + enable, + path, + dimensions, + package, + self, + ) + else: + return mfdatalist.MFList( + sim_data, + model_or_sim, + structure, + data, + enable, + path, + dimensions, + package, + self, + ) + elif data_type == mfstructure.DataType.list_transient: + if structure.basic_item and self._simulation_data.use_pandas: + trans_list = mfdataplist.MFPandasTransientList( + sim_data, + model_or_sim, + structure, + enable, + path, + dimensions, + package, + self, + ) + else: + trans_list = mfdatalist.MFTransientList( + sim_data, + model_or_sim, + structure, + enable, + path, + dimensions, + package, + self, + ) + if data is not None: + trans_list.set_data(data, key=0, autofill=True) + return trans_list + elif data_type == mfstructure.DataType.list_multiple: + mult_list = mfdatalist.MFMultipleList( + sim_data, + model_or_sim, + structure, + enable, + path, + dimensions, + package, + self, + ) + if data is not None: + mult_list.set_data(data, key=0, autofill=True) + return mult_list + + def _structure_init(self): + # load datasets keywords into dictionary + for dataset_struct in self.structure.data_structures.values(): + for keyword in dataset_struct.get_keywords(): + self.datasets_keyword[keyword] = dataset_struct + # load block header data items into dictionary + for dataset in self.structure.block_header_structure: + self._new_dataset(dataset.name, dataset, True, None) + + def set_model_relative_path(self, model_ws): + """Sets `model_ws` as the model path relative to the simulation's + path. + + Parameters + ---------- + model_ws : str + Model path relative to the simulation's path. + """ + # update datasets + for key, dataset in self.datasets.items(): + if dataset.structure.file_data: + try: + file_data = dataset.get_data() + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message="Error occurred while " + "getting file data from " + '"{}"'.format(dataset.structure.name), + ) + if file_data: + # update file path location for all file paths + for file_line in file_data: + old_file_name = os.path.split(file_line[0])[1] + file_line[0] = os.path.join(model_ws, old_file_name) + # update block headers + for block_header in self.block_headers: + for dataset in block_header.data_items: + if dataset.structure.file_data: + try: + file_data = dataset.get_data() + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message="Error occurred while " + "getting file data from " + '"{}"'.format(dataset.structure.name), + ) + + if file_data: + # update file path location for all file paths + for file_line in file_data: + old_file_path, old_file_name = os.path.split( + file_line[1] + ) + new_file_path = os.path.join( + model_ws, old_file_name + ) + # update transient keys of datasets within the + # block + for key, idataset in self.datasets.items(): + if isinstance(idataset, mfdata.MFTransient): + idataset.update_transient_key( + file_line[1], new_file_path + ) + file_line[1] = os.path.join( + model_ws, old_file_name + ) + + def add_dataset(self, dataset_struct, data, var_path): + """Add data to this block.""" + try: + self.datasets[var_path[-1]] = self.data_factory( + self._simulation_data, + self._model_or_sim, + dataset_struct, + True, + var_path, + self._dimensions, + data, + self._container_package, + ) + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message="Error occurred while adding" + ' dataset "{}" to block ' + '"{}"'.format(dataset_struct.name, self.structure.name), + ) + + self._simulation_data.mfdata[var_path] = self.datasets[var_path[-1]] + dtype = dataset_struct.get_datatype() + if ( + dtype == mfstructure.DataType.list_transient + or dtype == mfstructure.DataType.list_multiple + or dtype == mfstructure.DataType.array_transient + ): + # build repeating block header(s) + if isinstance(data, dict): + # Add block headers for each dictionary key + for index in data: + if isinstance(index, tuple): + header_list = list(index) + else: + header_list = [index] + self._build_repeating_header(header_list) + elif isinstance(data, list): + # Add a single block header of value 0 + self._build_repeating_header([0]) + elif ( + dtype != mfstructure.DataType.list_multiple + and data is not None + ): + self._build_repeating_header([[0]]) + + return self.datasets[var_path[-1]] + + def _build_repeating_header(self, header_data): + if self.header_exists(header_data[0]): + return + if ( + len(self.block_headers[-1].data_items) == 1 + and self.block_headers[-1].data_items[0].get_data() is not None + ): + block_header_path = self.path + (len(self.block_headers) + 1,) + block_header = MFBlockHeader( + self.structure.name, + [], + MFComment("", self.path, self._simulation_data, 0), + self._simulation_data, + block_header_path, + self, + ) + self.block_headers.append(block_header) + else: + block_header_path = self.path + (len(self.block_headers),) + + struct = self.structure + last_header = self.block_headers[-1] + try: + last_header.build_header_variables( + self._simulation_data, + struct.block_header_structure, + block_header_path, + header_data, + self._dimensions, + ) + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message="Error occurred while building" + " block header variables for block " + '"{}"'.format(last_header.name), + ) + + def _new_dataset( + self, key, dataset_struct, block_header=False, initial_val=None + ): + dataset_path = self.path + (key,) + if block_header: + if ( + dataset_struct.type == DatumType.integer + and initial_val is not None + and len(initial_val) >= 1 + and dataset_struct.get_record_size()[0] == 1 + ): + # stress periods are stored 0 based + initial_val = int(initial_val[0]) - 1 + if isinstance(initial_val, list): + initial_val_path = tuple(initial_val) + initial_val = [tuple(initial_val)] + else: + initial_val_path = initial_val + try: + new_data = self.data_factory( + self._simulation_data, + self._model_or_sim, + dataset_struct, + True, + dataset_path, + self._dimensions, + initial_val, + self._container_package, + ) + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message="Error occurred while adding" + ' dataset "{}" to block ' + '"{}"'.format(dataset_struct.name, self.structure.name), + ) + self.block_headers[-1].add_data_item(new_data, initial_val_path) + + else: + try: + self.datasets[key] = self.data_factory( + self._simulation_data, + self._model_or_sim, + dataset_struct, + True, + dataset_path, + self._dimensions, + initial_val, + self._container_package, + ) + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message="Error occurred while adding" + ' dataset "{}" to block ' + '"{}"'.format(dataset_struct.name, self.structure.name), + ) + for keyword in dataset_struct.get_keywords(): + self.datasets_keyword[keyword] = dataset_struct + + def is_empty(self): + """Returns true if this block is empty.""" + for key, dataset in self.datasets.items(): + try: + has_data = dataset.has_data() + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message="Error occurred while verifying" + ' data of dataset "{}" in block ' + '"{}"'.format(dataset.structure.name, self.structure.name), + ) + + if has_data is not None and has_data: + return False + return True + + def load(self, block_header, fd, strict=True): + """Loads block from file object. file object must be advanced to + beginning of block before calling. + + Parameters + ---------- + block_header : MFBlockHeader + Block header for block block being loaded. + fd : file + File descriptor of file being loaded + strict : bool + Enforce strict MODFLOW 6 file format. + """ + # verify number of header variables + if ( + len(block_header.variable_strings) + < self.structure.number_non_optional_block_header_data() + ): + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + warning_str = ( + 'WARNING: Block header for block "{}" does not ' + "contain the correct number of " + "variables {}".format(block_header.name, self.path) + ) + print(warning_str) + return + + if self.loaded: + # verify header has not already been loaded + for bh_current in self.block_headers: + if bh_current.is_same_header(block_header): + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + warning_str = ( + 'WARNING: Block header for block "{}" is ' + "not a unique block header " + "{}".format(block_header.name, self.path) + ) + print(warning_str) + return + + # init + self.enabled = True + if not self.loaded: + self.block_headers = [] + block_header.block = self + self.block_headers.append(block_header) + + # process any header variable + if len(self.structure.block_header_structure) > 0: + dataset = self.structure.block_header_structure[0] + self._new_dataset( + dataset.name, + dataset, + True, + self.block_headers[-1].variable_strings, + ) + + # handle special readasarrays case + if ( + self._container_package.structure.read_as_arrays + or ( + hasattr(self._container_package, "aux") + and self._container_package.aux.structure.layered + ) + ): + # auxiliary variables may appear with aux variable name as keyword + aux_vars = self._container_package.auxiliary.get_data() + if aux_vars is not None: + for var_name in list(aux_vars[0])[1:]: + self.datasets_keyword[(var_name,)] = ( + self._container_package.aux.structure + ) + + comments = [] + + # capture any initial comments + initial_comment = MFComment("", "", 0) + fd_block = fd + line = fd_block.readline() + datautil.PyListUtil.reset_delimiter_used() + arr_line = datautil.PyListUtil.split_data_line(line) + post_data_comments = MFComment("", "", self._simulation_data, 0) + while MFComment.is_comment(line, True): + initial_comment.add_text(line) + line = fd_block.readline() + arr_line = datautil.PyListUtil.split_data_line(line) + + # if block not empty + external_file_info = None + if not (len(arr_line[0]) > 2 and arr_line[0][:3].upper() == "END"): + if arr_line[0].lower() == "open/close": + # open block contents from external file + fd_block.readline() + root_path = self._simulation_data.mfpath.get_sim_path() + try: + file_name = os.path.split(arr_line[1])[-1] + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print( + f' opening external file "{file_name}"...' + ) + external_file_info = arr_line + except: + type_, value_, traceback_ = sys.exc_info() + message = f'Error reading external file specified in line "{line}"' + raise MFDataException( + self._container_package.model_name, + self._container_package._get_pname(), + self.path, + "reading external file", + self.structure.name, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self._simulation_data.debug, + ) + if len(self.structure.data_structures) <= 1: + # load a single data set + dataset = self.datasets[next(iter(self.datasets))] + try: + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print( + f" loading data {dataset.structure.name}..." + ) + next_line = dataset.load( + line, + fd_block, + self.block_headers[-1], + initial_comment, + external_file_info, + ) + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message='Error occurred while loading data "{}" in ' + 'block "{}" from file "{}"' + ".".format( + dataset.structure.name, + self.structure.name, + fd_block.name, + ), + ) + package_info_list = self._get_package_info(dataset) + if package_info_list is not None: + for package_info in package_info_list: + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print( + f" loading child package {package_info[0]}..." + ) + fname = package_info[1] + if package_info[2] is not None: + fname = os.path.join(package_info[2], fname) + filemgr = self._simulation_data.mfpath + fname = filemgr.strip_model_relative_path( + self._model_or_sim.name, fname + ) + pkg = self._model_or_sim.load_package( + package_info[0], + fname, + package_info[1], + True, + "", + package_info[3], + self._container_package, + ) + if hasattr(self._container_package, package_info[0]): + package_group = getattr( + self._container_package, package_info[0] + ) + package_group._append_package( + pkg, pkg.filename, False + ) + + if next_line[1] is not None: + arr_line = datautil.PyListUtil.split_data_line( + next_line[1] + ) + else: + arr_line = "" + # capture any trailing comments + dataset.post_data_comments = post_data_comments + while arr_line and ( + len(next_line[1]) <= 2 or arr_line[0][:3].upper() != "END" + ): + next_line[1] = fd_block.readline().strip() + arr_line = datautil.PyListUtil.split_data_line( + next_line[1] + ) + if arr_line and ( + len(next_line[1]) <= 2 + or arr_line[0][:3].upper() != "END" + ): + post_data_comments.add_text(" ".join(arr_line)) + else: + # look for keyword and store line as data or comment + try: + key, results = self._find_data_by_keyword( + line, fd_block, initial_comment + ) + except MFInvalidTransientBlockHeaderException as e: + warning_str = f"WARNING: {e}" + print(warning_str) + self.block_headers.pop() + return + + self._save_comments(arr_line, line, key, comments) + if results[1] is None or results[1][:3].upper() != "END": + # block consists of unordered datasets + # load the data sets out of order based on + # initial constants + line = " " + while line != "": + line = fd_block.readline() + arr_line = datautil.PyListUtil.split_data_line(line) + if arr_line: + # determine if at end of block + if ( + len(arr_line[0]) > 2 + and arr_line[0][:3].upper() == "END" + ): + break + # look for keyword and store line as data o + # r comment + key, result = self._find_data_by_keyword( + line, fd_block, initial_comment + ) + self._save_comments(arr_line, line, key, comments) + if ( + result[1] is not None + and result[1][:3].upper() == "END" + ): + break + else: + # block empty, store empty array in block variables + empty_arr = [] + for ds in self.datasets.values(): + if isinstance(ds, mfdata.MFTransient): + transient_key = block_header.get_transient_key() + ds.set_data(empty_arr, key=transient_key) + self.loaded = True + self.is_valid() + + def _find_data_by_keyword(self, line, fd, initial_comment): + first_key = None + nothing_found = False + next_line = [True, line] + while next_line[0] and not nothing_found: + arr_line = datautil.PyListUtil.split_data_line(next_line[1]) + key = datautil.find_keyword(arr_line, self.datasets_keyword) + if key is not None: + ds_name = self.datasets_keyword[key].name + try: + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print(f" loading data {ds_name}...") + next_line = self.datasets[ds_name].load( + next_line[1], + fd, + self.block_headers[-1], + initial_comment, + ) + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message="Error occurred while " + 'loading data "{}" in ' + 'block "{}" from file "{}"' + ".".format(ds_name, self.structure.name, fd.name), + ) + + # see if first item's name indicates a reference to + # another package + package_info_list = self._get_package_info( + self.datasets[ds_name] + ) + if package_info_list is not None: + for package_info in package_info_list: + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print( + f" loading child package {package_info[1]}..." + ) + fname = package_info[1] + if package_info[2] is not None: + fname = os.path.join(package_info[2], fname) + filemgr = self._simulation_data.mfpath + fname = filemgr.strip_model_relative_path( + self._model_or_sim.name, fname + ) + pkg = self._model_or_sim.load_package( + package_info[0], + fname, + package_info[1], + True, + "", + package_info[3], + self._container_package, + ) + if hasattr(self._container_package, package_info[0]): + package_group = getattr( + self._container_package, package_info[0] + ) + package_group._append_package( + pkg, pkg.filename, False + ) + if first_key is None: + first_key = key + nothing_found = False + elif ( + arr_line[0].lower() == "readasarrays" + and self.path[-1].lower() == "options" + and self._container_package.structure.read_as_arrays is False + ): + error_msg = ( + "ERROR: Attempting to read a ReadAsArrays " + "package as a non-ReadAsArrays " + "package {}".format(self.path) + ) + raise ReadAsArraysException(error_msg) + else: + nothing_found = True + + if first_key is None: + # look for recarrays. if there is a lone recarray in this block, + # use it by default + recarrays = self.structure.get_all_recarrays() + if len(recarrays) != 1: + return key, [None, None] + dataset = self.datasets[recarrays[0].name] + ds_result = dataset.load( + line, fd, self.block_headers[-1], initial_comment + ) + + # see if first item's name indicates a reference to another + # package + package_info_list = self._get_package_info(dataset) + if package_info_list is not None: + for package_info in package_info_list: + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print( + f" loading child package {package_info[0]}..." + ) + fname = package_info[1] + if package_info[2] is not None: + fname = os.path.join(package_info[2], fname) + filemgr = self._simulation_data.mfpath + fname = filemgr.strip_model_relative_path( + self._model_or_sim.name, fname + ) + pkg = self._model_or_sim.load_package( + package_info[0], + fname, + None, + True, + "", + package_info[3], + self._container_package, + ) + if hasattr(self._container_package, package_info[0]): + package_group = getattr( + self._container_package, package_info[0] + ) + package_group._append_package(pkg, pkg.filename, False) + + return recarrays[0].keyword, ds_result + else: + return first_key, next_line + + def _get_package_info(self, dataset): + if not dataset.structure.file_data: + return None + for index in range(0, len(dataset.structure.data_item_structures)): + data_item = dataset.structure.data_item_structures[index] + if ( + data_item.type == DatumType.keyword + or data_item.type == DatumType.string + ): + item_name = data_item.name + package_type = item_name[:-1] + model_type = self._model_or_sim.structure.model_type + # not all packages have the same naming convention + # try different naming conventions to find the appropriate + # package + package_types = [ + package_type, + f"{self._container_package.package_type}" + f"{package_type}", + ] + package_type_found = None + for ptype in package_types: + if ( + PackageContainer.package_factory(ptype, model_type) + is not None + ): + package_type_found = ptype + break + if package_type_found is not None: + try: + data = dataset.get_data() + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message="Error occurred while " + 'getting data from "{}" ' + 'in block "{}".'.format( + dataset.structure.name, self.structure.name + ), + ) + package_info_list = [] + if isinstance(data, np.recarray): + for row in data: + self._add_to_info_list( + package_info_list, + row[index], + package_type_found, + ) + else: + self._add_to_info_list( + package_info_list, data, package_type_found + ) + + return package_info_list + return None + + def _add_to_info_list( + self, package_info_list, file_location, package_type_found + ): + file_path, file_name = os.path.split(file_location) + dict_package_name = f"{package_type_found}_{self.path[-2]}" + package_info_list.append( + ( + package_type_found, + file_name, + file_path, + dict_package_name, + ) + ) + + def _save_comments(self, arr_line, line, key, comments): + # FIX: Save these comments somewhere in the data set + if key not in self.datasets_keyword: + if MFComment.is_comment(key, True): + if comments: + comments.append("\n") + comments.append(arr_line) + + def write(self, fd, ext_file_action=ExtFileAction.copy_relative_paths): + """Writes block to a file object. + + Parameters + ---------- + fd : file object + File object to write to. + + """ + # never write an empty block + is_empty = self.is_empty() + if ( + is_empty + and self.structure.name.lower() != "exchanges" + and self.structure.name.lower() != "options" + and self.structure.name.lower() != "sources" + and self.structure.name.lower() != "stressperioddata" + ): + return + if self.structure.repeating(): + repeating_datasets = self._find_repeating_datasets() + for repeating_dataset in repeating_datasets: + # resolve any missing block headers + self._add_missing_block_headers(repeating_dataset) + for block_header in sorted(self.block_headers): + # write block + self._write_block(fd, block_header, ext_file_action) + else: + self._write_block(fd, self.block_headers[0], ext_file_action) + + def _add_missing_block_headers(self, repeating_dataset): + key_data_list = repeating_dataset.get_active_key_list() + # assemble a dictionary of data keys and empty keys + key_dict = {} + for key in key_data_list: + key_dict[key[0]] = True + for key, value in repeating_dataset.empty_keys.items(): + if value: + key_dict[key] = True + for key in key_dict.keys(): + has_data = repeating_dataset.has_data(key) + empty_key = ( + key in repeating_dataset.empty_keys + and repeating_dataset.empty_keys[key] + ) + if not self.header_exists(key) and (has_data or empty_key): + self._build_repeating_header([key]) + + def header_exists(self, key, data_path=None): + if not isinstance(key, list): + if key is None: + return + comp_key_list = [key] + else: + comp_key_list = key + for block_header in self.block_headers: + transient_key = block_header.get_transient_key(data_path) + if transient_key is True: + return + for comp_key in comp_key_list: + if transient_key is not None and transient_key == comp_key: + return True + return False + + def set_all_data_external( + self, + base_name, + check_data=True, + external_data_folder=None, + binary=False, + ): + """Sets the block's list and array data to be stored externally, + base_name is external file name's prefix, check_data determines + if data error checking is enabled during this process. + + Warning + ------- + The MF6 check mechanism is deprecated pending reimplementation + in a future release. While the checks API will remain in place + through 3.x, it may be unstable, and will likely change in 4.x. + + Parameters + ---------- + base_name : str + Base file name of external files where data will be written to. + check_data : bool + Whether to do data error checking. + external_data_folder + Folder where external data will be stored + binary: bool + Whether file will be stored as binary + + """ + + for key, dataset in self.datasets.items(): + lst_data = isinstance(dataset, mfdatalist.MFList) or isinstance( + dataset, mfdataplist.MFPandasList + ) + if ( + isinstance(dataset, mfdataarray.MFArray) + or (lst_data and dataset.structure.type == DatumType.recarray) + and dataset.enabled + ): + if not binary or ( + lst_data + and ( + dataset.data_dimensions.package_dim.boundnames() + or not dataset.structure.basic_item + ) + ): + ext = "txt" + binary = False + else: + ext = "bin" + file_path = f"{base_name}_{dataset.structure.name}.{ext}" + replace_existing_external = False + if external_data_folder is not None: + # get simulation root path + root_path = self._simulation_data.mfpath.get_sim_path() + # get model relative path, if it exists + if isinstance(self._model_or_sim, ModelInterface): + name = self._model_or_sim.name + rel_path = ( + self._simulation_data.mfpath.model_relative_path[ + name + ] + ) + if rel_path is not None: + root_path = os.path.join(root_path, rel_path) + full_path = os.path.join(root_path, external_data_folder) + if not os.path.exists(full_path): + # create new external data folder + os.makedirs(full_path) + file_path = os.path.join(external_data_folder, file_path) + replace_existing_external = True + dataset.store_as_external_file( + file_path, + replace_existing_external=replace_existing_external, + check_data=check_data, + binary=binary, + ) + + def set_all_data_internal(self, check_data=True): + """Sets the block's list and array data to be stored internally, + check_data determines if data error checking is enabled during this + process. + + Warning + ------- + The MF6 check mechanism is deprecated pending reimplementation + in a future release. While the checks API will remain in place + through 3.x, it may be unstable, and will likely change in 4.x. + + Parameters + ---------- + check_data : bool + Whether to do data error checking. + + """ + + for key, dataset in self.datasets.items(): + if ( + isinstance(dataset, mfdataarray.MFArray) + or ( + ( + isinstance(dataset, mfdatalist.MFList) + or isinstance(dataset, mfdataplist.MFPandasList) + ) + and dataset.structure.type == DatumType.recarray + ) + and dataset.enabled + ): + dataset.store_internal(check_data=check_data) + + def _find_repeating_datasets(self): + repeating_datasets = [] + for key, dataset in self.datasets.items(): + if dataset.repeating: + repeating_datasets.append(dataset) + return repeating_datasets + + def _prepare_external(self, fd, file_name, binary=False): + fd_main = fd + fd_path = self._simulation_data.mfpath.get_model_path(self.path[0]) + # resolve full file and folder path + fd_file_path = os.path.join(fd_path, file_name) + fd_folder_path = os.path.split(fd_file_path)[0] + if fd_folder_path != "": + if not os.path.exists(fd_folder_path): + # create new external data folder + os.makedirs(fd_folder_path) + return fd_main, fd_file_path + + def _write_block(self, fd, block_header, ext_file_action): + transient_key = None + basic_list = False + dataset_one = list(self.datasets.values())[0] + if isinstance( + dataset_one, + (mfdataplist.MFPandasList, mfdataplist.MFPandasTransientList), + ): + basic_list = True + for dataset in self.datasets.values(): + assert isinstance( + dataset, + ( + mfdataplist.MFPandasList, + mfdataplist.MFPandasTransientList, + ), + ) + # write block header + block_header.write_header(fd) + if len(block_header.data_items) > 0: + transient_key = block_header.get_transient_key() + + # gather data sets to write + data_set_output = [] + data_found = False + for key, dataset in self.datasets.items(): + try: + if transient_key is None: + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print( + f" writing data {dataset.structure.name}..." + ) + if basic_list: + ext_fname = dataset.external_file_name() + if ext_fname is not None: + binary = dataset.binary_ext_data() + # write block contents to external file + fd_main, fd = self._prepare_external( + fd, ext_fname, binary + ) + dataset.write_file_entry(fd, fd_main=fd_main) + fd = fd_main + else: + dataset.write_file_entry(fd) + else: + data_set_output.append( + dataset.get_file_entry( + ext_file_action=ext_file_action + ) + ) + data_found = True + else: + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print( + " writing data {} ({}).." ".".format( + dataset.structure.name, transient_key + ) + ) + if basic_list: + ext_fname = dataset.external_file_name(transient_key) + if ext_fname is not None: + binary = dataset.binary_ext_data(transient_key) + # write block contents to external file + fd_main, fd = self._prepare_external( + fd, ext_fname, binary + ) + dataset.write_file_entry( + fd, + transient_key, + ext_file_action=ext_file_action, + fd_main=fd_main, + ) + fd = fd_main + else: + dataset.write_file_entry( + fd, + transient_key, + ext_file_action=ext_file_action, + ) + else: + if dataset.repeating: + output = dataset.get_file_entry( + transient_key, ext_file_action=ext_file_action + ) + if output is not None: + data_set_output.append(output) + data_found = True + else: + data_set_output.append( + dataset.get_file_entry( + ext_file_action=ext_file_action + ) + ) + data_found = True + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message=( + "Error occurred while writing data " + f'"{dataset.structure.name}" in block ' + f'"{self.structure.name}" to file "{fd.name}"' + ), + ) + if not data_found: + return + if not basic_list: + # write block header + block_header.write_header(fd) + + if self.external_file_name is not None: + indent_string = self._simulation_data.indent_string + fd.write( + f"{indent_string}open/close " + f'"{self.external_file_name}"\n' + ) + # write block contents to external file + fd_main, fd = self._prepare_external( + fd, self.external_file_name + ) + # write data sets + for output in data_set_output: + fd.write(output) + + # write trailing comments + pth = block_header.blk_trailing_comment_path + if pth in self._simulation_data.mfdata: + self._simulation_data.mfdata[pth].write(fd) + + if self.external_file_name is not None and not basic_list: + # switch back writing to package file + fd.close() + fd = fd_main + + # write block footer + block_header.write_footer(fd) + + # write post block comments + pth = block_header.blk_post_comment_path + if pth in self._simulation_data.mfdata: + self._simulation_data.mfdata[pth].write(fd) + + # write extra line if comments are off + if not self._simulation_data.comments_on: + fd.write("\n") + + def is_allowed(self): + """Determine if block is valid based on the values of dependent + MODFLOW variables.""" + if self.structure.variable_dependant_path: + # fill in empty part of the path with the current path + if len(self.structure.variable_dependant_path) == 3: + dependant_var_path = ( + self.path[0], + ) + self.structure.variable_dependant_path + elif len(self.structure.variable_dependant_path) == 2: + dependant_var_path = ( + self.path[0], + self.path[1], + ) + self.structure.variable_dependant_path + elif len(self.structure.variable_dependant_path) == 1: + dependant_var_path = ( + self.path[0], + self.path[1], + self.path[2], + ) + self.structure.variable_dependant_path + else: + dependant_var_path = None + + # get dependency + dependant_var = None + mf_data = self._simulation_data.mfdata + if dependant_var_path in mf_data: + dependant_var = mf_data[dependant_var_path] + + # resolve dependency + if self.structure.variable_value_when_active[0] == "Exists": + exists = self.structure.variable_value_when_active[1] + if dependant_var and exists.lower() == "true": + return True + elif not dependant_var and exists.lower() == "false": + return True + else: + return False + elif not dependant_var: + return False + elif self.structure.variable_value_when_active[0] == ">": + min_val = self.structure.variable_value_when_active[1] + if dependant_var > float(min_val): + return True + else: + return False + elif self.structure.variable_value_when_active[0] == "<": + max_val = self.structure.variable_value_when_active[1] + if dependant_var < float(max_val): + return True + else: + return False + return True + + def is_valid(self): + """ + Returns true if the block is valid. + """ + # check data sets + for dataset in self.datasets.values(): + # Non-optional datasets must be enabled + if not dataset.structure.optional and not dataset.enabled: + return False + # Enabled blocks must be valid + if dataset.enabled and not dataset.is_valid: + return False + # check variables + for block_header in self.block_headers: + for dataset in block_header.data_items: + # Non-optional datasets must be enabled + if not dataset.structure.optional and not dataset.enabled: + return False + # Enabled blocks must be valid + if dataset.enabled and not dataset.is_valid(): + return False + + +class MFPackage(PackageInterface): + """ + Provides an interface for the user to specify data to build a package. + + Parameters + ---------- + parent : MFModel, MFSimulation, or MFPackage + The parent model, simulation, or package containing this package + package_type : str + String defining the package type + filename : str or PathLike + Name or path of file where this package is stored + quoted_filename : str + Filename with quotes around it when there is a space in the name + pname : str + Package name + loading_package : bool + Whether or not to add this package to the parent container's package + list during initialization + + Attributes + ---------- + blocks : dict + Dictionary of blocks contained in this package by block name + path : tuple + Data dictionary path to this package + structure : PackageStructure + Describes the blocks and data contain in this package + dimensions : PackageDimension + Resolves data dimensions for data within this package + + """ + + def __init__( + self, + parent, + package_type, + filename=None, + pname=None, + loading_package=False, + **kwargs, + ): + parent_file = kwargs.pop("parent_file", None) + if isinstance(parent, MFPackage): + self.model_or_sim = parent.model_or_sim + self.parent_file = parent + elif parent_file is not None: + self.model_or_sim = parent + self.parent_file = parent_file + else: + self.model_or_sim = parent + self.parent_file = None + _internal_package = kwargs.pop("_internal_package", False) + if _internal_package: + self.internal_package = True + else: + self.internal_package = False + self._data_list = [] + self._package_type = package_type + if self.model_or_sim.type == "Model" and package_type.lower() != "nam": + self.model_name = self.model_or_sim.name + else: + self.model_name = None + + # a package must have a dfn_file_name + if not hasattr(self, "dfn_file_name"): + self.dfn_file_name = "" + + if ( + self.model_or_sim.type != "Model" + and self.model_or_sim.type != "Simulation" + ): + message = ( + "Invalid model_or_sim parameter. Expecting either a " + 'model or a simulation. Instead type "{}" was ' + "given.".format(type(self.model_or_sim)) + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.model_name, + pname, + "", + "initializing package", + None, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self.model_or_sim.simulation_data.debug, + ) + + self._package_container = PackageContainer( + self.model_or_sim.simulation_data + ) + self.simulation_data = self.model_or_sim.simulation_data + + self.blocks = {} + self.container_type = [] + self.loading_package = loading_package + if pname is not None: + if not isinstance(pname, str): + message = ( + "Invalid pname parameter. Expecting type str. " + 'Instead type "{}" was ' + "given.".format(type(pname)) + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.model_name, + pname, + "", + "initializing package", + None, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self.model_or_sim.simulation_data.debug, + ) + + self.package_name = pname.lower() + else: + self.package_name = None + + if filename is None: + if self.model_or_sim.type == "Simulation": + # filename uses simulation base name + base_name = os.path.basename( + os.path.normpath(self.model_or_sim.name) + ) + self._filename = f"{base_name}.{package_type}" + else: + # filename uses model base name + self._filename = f"{self.model_or_sim.name}.{package_type}" + else: + if not isinstance(filename, (str, os.PathLike)): + message = ( + "Invalid fname parameter. Expecting type str. " + 'Instead type "{}" was ' + "given.".format(type(filename)) + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.model_name, + pname, + "", + "initializing package", + None, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self.model_or_sim.simulation_data.debug, + ) + self._filename = datautil.clean_filename( + str(filename).replace("\\", "/") + ) + self.path, self.structure = self.model_or_sim.register_package( + self, not loading_package, pname is None, filename is None + ) + self.dimensions = self.create_package_dimensions() + + if self.path is None: + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + print( + "WARNING: Package type {} failed to register property." + " {}".format(self._package_type, self.path) + ) + if self.parent_file is not None: + self.container_type.append(PackageContainerType.package) + # init variables that may be used later + self.post_block_comments = None + self.last_error = None + self.bc_color = "black" + self.__inattr = False + self._child_package_groups = {} + child_builder_call = kwargs.pop("child_builder_call", None) + if ( + self.parent_file is not None + and child_builder_call is None + and package_type in self.parent_file._child_package_groups + ): + # initialize as part of the parent's child package group + chld_pkg_grp = self.parent_file._child_package_groups[package_type] + chld_pkg_grp.init_package(self, self._filename, False) + + # remove any remaining valid kwargs + key_list = list(kwargs.keys()) + for key in key_list: + if "filerecord" in key and hasattr(self, f"{key}"): + kwargs.pop(f"{key}") + # check for extraneous kwargs + if len(kwargs) > 0: + kwargs_str = ", ".join(kwargs.keys()) + excpt_str = ( + f'Extraneous kwargs "{kwargs_str}" provided to MFPackage.' + ) + raise FlopyException(excpt_str) + + def __init_subclass__(cls): + """Register package type""" + super().__init_subclass__() + PackageContainer.packages_by_abbr[cls.package_abbr] = cls + + def __setattr__(self, name, value): + if hasattr(self, name) and getattr(self, name) is not None: + attribute = object.__getattribute__(self, name) + if attribute is not None and isinstance(attribute, mfdata.MFData): + try: + if isinstance(attribute, mfdatalist.MFList): + attribute.set_data(value, autofill=True) + else: + attribute.set_data(value) + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self.model_name, + package=self._get_pname(), + ) + return + + if all( + hasattr(self, attr) for attr in ["model_or_sim", "_package_type"] + ): + if hasattr(self.model_or_sim, "_mg_resync"): + if not self.model_or_sim._mg_resync: + self.model_or_sim._mg_resync = self._mg_resync + + super().__setattr__(name, value) + + def __repr__(self): + return self._get_data_str(True) + + def __str__(self): + return self._get_data_str(False) + + @property + def filename(self): + """Package's file name.""" + return self._filename + + @property + def quoted_filename(self): + """Package's file name with quotes if there is a space.""" + if " " in self._filename: + return f'"{self._filename}"' + return self._filename + + @filename.setter + def filename(self, fname): + """Package's file name.""" + if ( + isinstance(self.parent_file, MFPackage) + and self.package_type in self.parent_file._child_package_groups + ): + fname = datautil.clean_filename(fname) + try: + child_pkg_group = self.parent_file._child_package_groups[ + self.structure.file_type + ] + child_pkg_group._update_filename(self._filename, fname) + except Exception: + print( + "WARNING: Unable to update file name for parent" + f"package of {self.package_name}." + ) + if self.model_or_sim is not None and fname is not None: + if self._package_type != "nam": + self.model_or_sim.update_package_filename(self, fname) + self._filename = fname + + @property + def package_type(self): + """String describing type of package""" + return self._package_type + + @property + def name(self): + """Name of package""" + return [self.package_name] + + @name.setter + def name(self, name): + """Name of package""" + self.package_name = name + + @property + def parent(self): + """Parent package""" + return self.model_or_sim + + @parent.setter + def parent(self, parent): + """Parent package""" + assert False, "Do not use this setter to set the parent" + + @property + def plottable(self): + """If package is plottable""" + if self.model_or_sim.type == "Simulation": + return False + else: + return True + + @property + def output(self): + """ + Method to get output associated with a specific package + + Returns + ------- + MF6Output object + """ + return MF6Output(self) + + @property + def data_list(self): + """List of data in this package.""" + # return [data_object, data_object, ...] + return self._data_list + + @property + def package_key_dict(self): + """ + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_type_dict + + @property + def package_names(self): + """Returns a list of package names. + + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_names + + @property + def package_dict(self): + """ + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_dict + + @property + def package_type_dict(self): + """ + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_type_dict + + @property + def package_name_dict(self): + """ + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_name_dict + + @property + def package_filename_dict(self): + """ + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_filename_dict + + def netcdf_attrs(self, mesh=None): + attrs = {} + + def attr_d(tagname, iaux=None, layer=None): + tag = tagname + name = f"{self.package_name}" + if iaux: + auxvar = self.dimensions.get_aux_variables()[0] + tag = f"{tag}/{iaux}" + name = f"{name}_{auxvar[iaux]}" + else: + name = f"{name}_{tagname}" + if layer: + tag = f"{tag}/layer{layer}" + name = f"{name}_l{layer}" + + a = {} + a["varname"] = name + a["attrs"] = {} + a["attrs"]["modflow_input"] = ( + f"{self.model_name}" + f"/{self.package_name}" + f"/{tagname}" + ).upper() + if iaux: + a["attrs"]["modflow_iaux"] = iaux + if layer: + a["attrs"]["layer"] = layer + return tag, a + + for key, block in self.blocks.items(): + if key != "griddata" and key != "period": + continue + for dataset in block.datasets.values(): + if isinstance(dataset, mfdataarray.MFArray): + for index, data_item in enumerate( + dataset.structure.data_item_structures + ): + if not (dataset.structure.netcdf and dataset.has_data()): + continue + if dataset.structure.layered and mesh == "LAYERED": + if data_item.name == "aux" or data_item.name == "auxvar": + for n, auxname in enumerate(self.dimensions.get_aux_variables()[0]): + if auxname == 'auxiliary' and n == 0: + continue + for l in range(self.model_or_sim.modelgrid.nlay): + key, a = attr_d(data_item.name, n, l+1) + attrs[key] = a + else: + for l in range(self.model_or_sim.modelgrid.nlay): + key, a = attr_d(data_item.name, layer=l+1) + attrs[key] = a + else: + if data_item.name == "aux" or data_item.name == "auxvar": + for n, auxname in enumerate(self.dimensions.get_aux_variables()[0]): + if auxname == 'auxiliary' and n == 0: + continue + key, a = attr_d(data_item.name, iaux=n) + attrs[key] = a + else: + key, a = attr_d(data_item.name) + attrs[key] = a + return attrs + + def get_package(self, name=None, type_only=False, name_only=False): + """ + Finds a package by package name, package key, package type, or partial + package name. returns either a single package, a list of packages, + or None. + + Parameters + ---------- + name : str + Name or type of the package, 'my-riv-1, 'RIV', 'LPF', etc. + type_only : bool + Search for package by type only + name_only : bool + Search for package by name only + + Returns + ------- + pp : Package object + + """ + return self._package_container.get_package(name, type_only, name_only) + + def add_package(self, package): + pkg_type = package.package_type.lower() + if pkg_type in self._package_container.package_type_dict: + for existing_pkg in self._package_container.package_type_dict[ + pkg_type + ]: + if existing_pkg is package: + # do not add the same package twice + return + self._package_container.add_package(package) + + def _get_aux_data(self, aux_names): + if hasattr(self, "stress_period_data"): + spd = self.stress_period_data.get_data() + if ( + 0 in spd + and spd[0] is not None + and aux_names[0][1] in spd[0].dtype.names + ): + return spd + if hasattr(self, "packagedata"): + pd = self.packagedata.get_data() + if aux_names[0][1] in pd.dtype.names: + return pd + if hasattr(self, "perioddata"): + spd = self.perioddata.get_data() + if ( + 0 in spd + and spd[0] is not None + and aux_names[0][1] in spd[0].dtype.names + ): + return spd + if hasattr(self, "aux"): + return self.aux.get_data() + return None + + def _boundnames_active(self): + if hasattr(self, "boundnames"): + if self.boundnames.get_data(): + return True + return False + + def check(self, f=None, verbose=True, level=1, checktype=None): + """ + Data check, returns True on success. + + Warning + ------- + The MF6 check mechanism is deprecated pending reimplementation + in a future release. While the checks API will remain in place + through 3.x, it may be unstable, and will likely change in 4.x. + """ + + if checktype is None: + checktype = mf6check + # do general checks + chk = super().check(f, verbose, level, checktype) + + # do mf6 specific checks + if hasattr(self, "auxiliary"): + # auxiliary variable check + # check if auxiliary variables are defined + aux_names = self.auxiliary.get_data() + if aux_names is not None and len(aux_names[0]) > 1: + num_aux_names = len(aux_names[0]) - 1 + # check for stress period data + aux_data = self._get_aux_data(aux_names) + if aux_data is not None and len(aux_data) > 0: + # make sure the check object exists + if chk is None: + chk = self._get_check(f, verbose, level, checktype) + if isinstance(aux_data, dict): + aux_datasets = list(aux_data.values()) + else: + aux_datasets = [aux_data] + dataset_type = "unknown" + for dataset in aux_datasets: + if isinstance(dataset, np.recarray): + dataset_type = "recarray" + break + elif isinstance(dataset, np.ndarray): + dataset_type = "ndarray" + break + # if aux data is in a list + if dataset_type == "recarray": + # check for time series data + time_series_name_dict = {} + if hasattr(self, "ts") and hasattr( + self.ts, "time_series_namerecord" + ): + # build dictionary of time series data variables + ts_nr = self.ts.time_series_namerecord.get_data() + if ts_nr is not None: + for item in ts_nr: + if len(item) > 0 and item[0] is not None: + time_series_name_dict[item[0]] = True + # auxiliary variables are last unless boundnames + # defined, then second to last + if self._boundnames_active(): + offset = 1 + else: + offset = 0 + + # loop through stress period datasets with aux data + for data in aux_datasets: + if isinstance(data, np.recarray): + for row in data: + row_size = len(row) + aux_start_loc = ( + row_size - num_aux_names - offset - 1 + ) + # loop through auxiliary variables + for idx, var in enumerate( + list(aux_names[0])[1:] + ): + # get index of current aux variable + data_index = aux_start_loc + idx + # verify auxiliary value is either + # numeric or time series variable + if ( + not datautil.DatumUtil.is_float( + row[data_index] + ) + and row[data_index] + not in time_series_name_dict + ): + desc = ( + f"Invalid non-numeric " + f"value " + f"'{row[data_index]}' " + f"in auxiliary data." + ) + chk._add_to_summary( + "Error", + desc=desc, + package=self.package_name, + ) + # else if stress period data is arrays + elif dataset_type == "ndarray": + # loop through auxiliary stress period datasets + for data in aux_datasets: + # verify auxiliary value is either numeric or time + # array series variable + if isinstance(data, np.ndarray): + val = np.isnan(np.sum(data)) + if val: + desc = ( + "One or more nan values were " + "found in auxiliary data." + ) + chk._add_to_summary( + "Warning", + desc=desc, + package=self.package_name, + ) + return chk + + def _get_nan_exclusion_list(self): + excl_list = [] + if hasattr(self, "stress_period_data"): + spd_struct = self.stress_period_data.structure + for item_struct in spd_struct.data_item_structures: + if item_struct.optional or item_struct.keystring_dict: + excl_list.append(item_struct.name) + return excl_list + + def _get_data_str(self, formal, show_data=True): + data_str = ( + "package_name = {}\nfilename = {}\npackage_type = {}" + "\nmodel_or_simulation_package = {}" + "\n{}_name = {}" + "\n".format( + self._get_pname(), + self._filename, + self.package_type, + self.model_or_sim.type.lower(), + self.model_or_sim.type.lower(), + self.model_or_sim.name, + ) + ) + if self.parent_file is not None and formal: + data_str = ( + f"{data_str}parent_file = {self.parent_file._get_pname()}\n\n" + ) + else: + data_str = f"{data_str}\n" + if show_data: + for block in self.blocks.values(): + if formal: + bl_repr = repr(block) + if len(bl_repr.strip()) > 0: + data_str = ( + "{}Block {}\n--------------------\n{}" "\n".format( + data_str, block.structure.name, repr(block) + ) + ) + else: + bl_str = str(block) + if len(bl_str.strip()) > 0: + data_str = ( + "{}Block {}\n--------------------\n{}" "\n".format( + data_str, block.structure.name, str(block) + ) + ) + return data_str + + def _get_pname(self): + if self.package_name is not None: + return str(self.package_name) + else: + return str(self._filename) + + def _get_block_header_info(self, line, path): + # init + header_variable_strs = [] + arr_clean_line = line.strip().split() + header_comment = MFComment( + "", path + (arr_clean_line[1],), self.simulation_data, 0 + ) + # break header into components + if len(arr_clean_line) < 2: + message = ( + "Block header does not contain a name. Name " + 'expected in line "{}".'.format(line) + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.model_name, + self._get_pname(), + self.path, + "parsing block header", + None, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self.simulation_data.debug, + ) + elif len(arr_clean_line) == 2: + return MFBlockHeader( + arr_clean_line[1], + header_variable_strs, + header_comment, + self.simulation_data, + path, + ) + else: + # process text after block name + comment = False + for entry in arr_clean_line[2:]: + # if start of comment + if MFComment.is_comment(entry.strip()[0]): + comment = True + if comment: + header_comment.text = " ".join( + [header_comment.text, entry] + ) + else: + header_variable_strs.append(entry) + return MFBlockHeader( + arr_clean_line[1], + header_variable_strs, + header_comment, + self.simulation_data, + path, + ) + + def _update_size_defs(self): + # build temporary data lookup by name + data_lookup = {} + for block in self.blocks.values(): + for dataset in block.datasets.values(): + data_lookup[dataset.structure.name] = dataset + + # loop through all data + for block in self.blocks.values(): + for dataset in block.datasets.values(): + # if data shape is 1-D + if ( + dataset.structure.shape + and len(dataset.structure.shape) == 1 + ): + # if shape name is data in this package + if dataset.structure.shape[0] in data_lookup: + size_def = data_lookup[dataset.structure.shape[0]] + size_def_name = size_def.structure.name + + if isinstance(dataset, mfdata.MFTransient): + # for transient data always use the maximum size + new_size = -1 + for key in dataset.get_active_key_list(): + try: + data = dataset.get_data(key=key[0]) + except (OSError, MFDataException): + # TODO: Handle case where external file + # path has been moved + data = None + if data is not None: + data_len = len(data) + if data_len > new_size: + new_size = data_len + else: + # for all other data set max to size + new_size = -1 + try: + data = dataset.get_data() + except (OSError, MFDataException): + # TODO: Handle case where external file + # path has been moved + data = None + if data is not None: + new_size = len(dataset.get_data()) + + if size_def.get_data() is None: + current_size = -1 + else: + current_size = size_def.get_data() + + if new_size > current_size: + # store current size + size_def.set_data(new_size) + + # informational message to the user + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + print( + "INFORMATION: {} in {} changed to {} " + "based on size of {}".format( + size_def_name, + size_def.structure.path[:-1], + new_size, + dataset.structure.name, + ) + ) + + def inspect_cells(self, cell_list, stress_period=None): + """ + Inspect model cells. Returns package data associated with cells. + + Parameters + ---------- + cell_list : list of tuples + List of model cells. Each model cell is a tuple of integers. + ex: [(1,1,1), (2,4,3)] + stress_period : int + For transient data, only return data from this stress period. If + not specified or None, all stress period data will be returned. + + Returns + ------- + output : array + Array containing inspection results + + """ + data_found = [] + + # loop through blocks + local_index_names = [] + local_index_blocks = [] + local_index_values = [] + local_index_cellids = [] + # loop through blocks in package + for block in self.blocks.values(): + # loop through data in block + for dataset in block.datasets.values(): + if isinstance(dataset, mfdatalist.MFList): + # handle list data + cellid_column = None + local_index_name = None + # loop through list data column definitions + for index, data_item in enumerate( + dataset.structure.data_item_structures + ): + if index == 0 and data_item.type == DatumType.integer: + local_index_name = data_item.name + # look for cellid column in list data row + if isinstance(data_item, MFDataItemStructure) and ( + data_item.is_cellid or data_item.possible_cellid + ): + cellid_column = index + break + if cellid_column is not None: + data_output = DataSearchOutput(dataset.path) + local_index_vals = [] + local_index_cells = [] + # get data + if isinstance(dataset, mfdatalist.MFTransientList): + # data may be in multiple transient blocks, get + # data from appropriate blocks + main_data = dataset.get_data(stress_period) + if stress_period is not None: + main_data = {stress_period: main_data} + else: + # data is all in one block, get data + main_data = {-1: dataset.get_data()} + + # loop through each dataset + for key, value in main_data.items(): + if value is None: + continue + if data_output.data_header is None: + data_output.data_header = value.dtype.names + # loop through list data rows + for line in value: + # loop through list of cells we are searching + # for + for cell in cell_list: + if isinstance( + line[cellid_column], tuple + ) and cellids_equal( + line[cellid_column], cell + ): + # save data found + data_output.data_entries.append(line) + data_output.data_entry_ids.append(cell) + data_output.data_entry_stress_period.append( + key + ) + if datautil.DatumUtil.is_int(line[0]): + # save index data for further + # processing. assuming index is + # always first entry + local_index_vals.append(line[0]) + local_index_cells.append(cell) + + if ( + local_index_name is not None + and len(local_index_vals) > 0 + ): + # capture index lookups for scanning related data + local_index_names.append(local_index_name) + local_index_blocks.append(block.path[-1]) + local_index_values.append(local_index_vals) + local_index_cellids.append(local_index_cells) + if len(data_output.data_entries) > 0: + data_found.append(data_output) + elif isinstance(dataset, mfdataarray.MFArray): + # handle array data + data_shape = copy.deepcopy( + dataset.structure.data_item_structures[0].shape + ) + if dataset.path[-1] == "top": + # top is a special case where the two datasets + # need to be combined to get the correct layer top + model_grid = self.model_or_sim.modelgrid + main_data = {-1: model_grid.top_botm} + data_shape.append("nlay") + else: + if isinstance(dataset, mfdataarray.MFTransientArray): + # data may be in multiple blocks, get data from + # appropriate blocks + main_data = dataset.get_data(stress_period) + if stress_period is not None: + main_data = {stress_period: main_data} + else: + # data is all in one block, get a process data + main_data = {-1: dataset.get_data()} + if main_data is None: + continue + data_output = DataSearchOutput(dataset.path) + # loop through datasets + for key, array_data in main_data.items(): + if array_data is None: + continue + self.model_or_sim.match_array_cells( + cell_list, data_shape, array_data, key, data_output + ) + if len(data_output.data_entries) > 0: + data_found.append(data_output) + + if len(local_index_names) > 0: + # look for data that shares the index value with data found + # for example a shared well or reach number + for block in self.blocks.values(): + # loop through data + for dataset in block.datasets.values(): + if isinstance(dataset, mfdatalist.MFList): + data_item = dataset.structure.data_item_structures[0] + data_output = DataSearchOutput(dataset.path) + # loop through previous data found + for ( + local_index_name, + local_index_vals, + cell_ids, + local_block_name, + ) in zip( + local_index_names, + local_index_values, + local_index_cellids, + local_index_blocks, + ): + if local_block_name == block.path[-1]: + continue + if ( + isinstance(data_item, MFDataItemStructure) + and data_item.name == local_index_name + and data_item.type == DatumType.integer + ): + # matching data index type found, get data + if isinstance( + dataset, mfdatalist.MFTransientList + ): + # data may be in multiple blocks, get data + # from appropriate blocks + main_data = dataset.get_data(stress_period) + if stress_period is not None: + main_data = {stress_period: main_data} + else: + # data is all in one block + main_data = {-1: dataset.get_data()} + # loop through the data + for key, value in main_data.items(): + if value is None: + continue + if data_output.data_header is None: + data_output.data_header = ( + value.dtype.names + ) + # loop through each row of data + for line in value: + # loop through the index values we are + # looking for + for index_val, cell_id in zip( + local_index_vals, cell_ids + ): + # try to match index values we are + # looking for to the data + if index_val == line[0]: + # save data found + data_output.data_entries.append( + line + ) + data_output.data_entry_ids.append( + index_val + ) + data_output.data_entry_cellids.append( + cell_id + ) + data_output.data_entry_stress_period.append( + key + ) + if len(data_output.data_entries) > 0: + data_found.append(data_output) + return data_found + + def remove(self): + """Removes this package from the simulation/model it is currently a + part of. + """ + self.model_or_sim.remove_package(self) + + def build_child_packages_container(self, pkg_type, filerecord): + """Builds a container object for any child packages. This method is + only intended for FloPy internal use.""" + # get package class + package_obj = PackageContainer.package_factory( + pkg_type, self.model_or_sim.model_type + ) + # create child package object + child_pkgs_name = f"utl{pkg_type}packages" + child_pkgs_obj = PackageContainer.package_factory(child_pkgs_name, "") + if child_pkgs_obj is None and self.model_or_sim.model_type is None: + # simulation level object, try just the package type in the name + child_pkgs_name = f"{pkg_type}packages" + child_pkgs_obj = PackageContainer.package_factory( + child_pkgs_name, "" + ) + if child_pkgs_obj is None: + # see if the package is part of one of the supported model types + for model_type in MFStructure().sim_struct.model_types: + child_pkgs_name = f"{model_type}{pkg_type}packages" + child_pkgs_obj = PackageContainer.package_factory( + child_pkgs_name, "" + ) + if child_pkgs_obj is not None: + break + child_pkgs = child_pkgs_obj( + self.model_or_sim, self, pkg_type, filerecord, None, package_obj + ) + setattr(self, pkg_type, child_pkgs) + self._child_package_groups[pkg_type] = child_pkgs + + def _get_dfn_name_dict(self): + dfn_name_dict = {} + item_num = 0 + for item in self.structure.dfn_list: + if len(item) > 1: + item_name = item[1].split() + if len(item_name) > 1 and item_name[0] == "name": + dfn_name_dict[item_name[1]] = item_num + item_num += 1 + return dfn_name_dict + + def build_child_package(self, pkg_type, data, parameter_name, filerecord): + """Builds a child package. This method is only intended for FloPy + internal use.""" + if not hasattr(self, pkg_type): + self.build_child_packages_container(pkg_type, filerecord) + if data is not None: + package_group = getattr(self, pkg_type) + # build child package file name + child_path = package_group.next_default_file_path() + # create new empty child package + package_obj = PackageContainer.package_factory( + pkg_type, self.model_or_sim.model_type + ) + package = package_obj( + self, filename=child_path, child_builder_call=True + ) + assert hasattr(package, parameter_name) + + if isinstance(data, dict): + # order data correctly + dfn_name_dict = package._get_dfn_name_dict() + ordered_data_items = [] + for key, value in data.items(): + if key in dfn_name_dict: + ordered_data_items.append( + [dfn_name_dict[key], key, value] + ) + else: + ordered_data_items.append([999999, key, value]) + ordered_data_items = sorted( + ordered_data_items, key=lambda x: x[0] + ) + + # evaluate and add data to package + unused_data = {} + for order, key, value in ordered_data_items: + # if key is an attribute of the child package + if isinstance(key, str) and hasattr(package, key): + # set child package attribute + child_data_attr = getattr(package, key) + if isinstance(child_data_attr, mfdatalist.MFList): + child_data_attr.set_data(value, autofill=True) + elif isinstance(child_data_attr, mfdata.MFData): + child_data_attr.set_data(value) + elif key == "fname" or key == "filename": + child_path = value + package._filename = value + else: + setattr(package, key, value) + else: + unused_data[key] = value + if unused_data: + setattr(package, parameter_name, unused_data) + else: + setattr(package, parameter_name, data) + + # append package to list + package_group.init_package(package, child_path) + return package + + def build_mfdata(self, var_name, data=None): + """Returns the appropriate data type object (mfdatalist, mfdataarray, + or mfdatascalar) given that object the appropriate structure (looked + up based on var_name) and any data supplied. This method is for + internal FloPy library use only. + + Parameters + ---------- + var_name : str + Variable name + + data : many supported types + Data contained in this object + + Returns + ------- + data object : MFData subclass + + """ + if self.loading_package: + data = None + for key, block in self.structure.blocks.items(): + if var_name in block.data_structures: + if block.name not in self.blocks: + self.blocks[block.name] = MFBlock( + self.simulation_data, + self.dimensions, + block, + self.path + (key,), + self.model_or_sim, + self, + ) + dataset_struct = block.data_structures[var_name] + var_path = self.path + (key, var_name) + ds = self.blocks[block.name].add_dataset( + dataset_struct, data, var_path + ) + self._data_list.append(ds) + return ds + + message = 'Unable to find variable "{}" in package ' '"{}".'.format( + var_name, self.package_type + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.model_name, + self._get_pname(), + self.path, + "building data objects", + None, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self.simulation_data.debug, + ) + + def set_model_relative_path(self, model_ws): + """Sets the model path relative to the simulation's path. + + Parameters + ---------- + model_ws : str + Model path relative to the simulation's path. + + """ + # update blocks + for key, block in self.blocks.items(): + block.set_model_relative_path(model_ws) + # update sub-packages + for package in self._package_container.packagelist: + package.set_model_relative_path(model_ws) + + def set_all_data_external( + self, + check_data=True, + external_data_folder=None, + base_name=None, + binary=False, + ): + """Sets the package's list and array data to be stored externally. + + Parameters + ---------- + check_data : bool + Determine if data error checking is enabled + external_data_folder + Folder where external data will be stored + base_name: str + Base file name prefix for all files + binary: bool + Whether file will be stored as binary + """ + # set blocks + for key, block in self.blocks.items(): + file_name = os.path.split(self.filename)[1] + if base_name is not None: + file_name = f"{base_name}_{file_name}" + block.set_all_data_external( + file_name, + check_data, + external_data_folder, + binary, + ) + # set sub-packages + for package in self._package_container.packagelist: + package.set_all_data_external( + check_data, + external_data_folder, + base_name, + binary, + ) + + def set_all_data_internal(self, check_data=True): + """Sets the package's list and array data to be stored internally. + + Parameters + ---------- + check_data : bool + Determine if data error checking is enabled + + """ + # set blocks + for key, block in self.blocks.items(): + block.set_all_data_internal(check_data) + # set sub-packages + for package in self._package_container.packagelist: + package.set_all_data_internal(check_data) + + def load(self, strict=True): + """Loads the package from file. + + Parameters + ---------- + strict : bool + Enforce strict checking of data. + + Returns + ------- + success : bool + + """ + # open file + try: + fd_input_file = open( + datautil.clean_filename(self.get_file_path()), "r" + ) + except OSError as e: + if e.errno == errno.ENOENT: + message = "File {} of type {} could not be opened.".format( + self.get_file_path(), self.package_type + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.model_name, + self.package_name, + self.path, + "loading package file", + None, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self.simulation_data.debug, + ) + + try: + self._load_blocks(fd_input_file, strict) + except ReadAsArraysException as err: + fd_input_file.close() + raise ReadAsArraysException(err) + # close file + fd_input_file.close() + + if self.simulation_data.auto_set_sizes: + self._update_size_defs() + + # return validity of file + return self.is_valid() + + def is_valid(self): + """Returns whether or not this package is valid. + + Returns + ------- + is valid : bool + + """ + # Check blocks + for block in self.blocks.values(): + # Non-optional blocks must be enabled + if ( + block.structure.number_non_optional_data() > 0 + and not block.enabled + and block.is_allowed() + ): + self.last_error = ( + f'Required block "{block.block_header.name}" not enabled' + ) + return False + # Enabled blocks must be valid + if block.enabled and not block.is_valid: + self.last_error = f'Invalid block "{block.block_header.name}"' + return False + + return True + + def _load_blocks(self, fd_input_file, strict=True, max_blocks=sys.maxsize): + # init + self.simulation_data.mfdata[self.path + ("pkg_hdr_comments",)] = ( + MFComment("", self.path, self.simulation_data) + ) + self.post_block_comments = MFComment( + "", self.path, self.simulation_data + ) + + blocks_read = 0 + found_first_block = False + line = " " + while line != "": + line = fd_input_file.readline() + clean_line = line.strip() + # If comment or empty line + if MFComment.is_comment(clean_line, True): + self._store_comment(line, found_first_block) + elif len(clean_line) > 4 and clean_line[:5].upper() == "BEGIN": + # parse block header + try: + block_header_info = self._get_block_header_info( + line, self.path + ) + except MFDataException as mfde: + message = ( + "An error occurred while loading block header " + 'in line "{}".'.format(line) + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.model_name, + self._get_pname(), + self.path, + "loading block header", + None, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self.simulation_data.debug, + mfde, + ) + + # if there is more than one possible block with the same name, + # resolve the correct block to use + block_key = block_header_info.name.lower() + block_num = 1 + possible_key = f"{block_header_info.name.lower()}-{block_num}" + if possible_key in self.blocks: + block_key = possible_key + block_header_name = block_header_info.name.lower() + while ( + block_key in self.blocks + and not self.blocks[block_key].is_allowed() + ): + block_key = f"{block_header_name}-{block_num}" + block_num += 1 + + if block_key not in self.blocks: + # block name not recognized, load block as comments and + # issue a warning + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + warning_str = ( + 'WARNING: Block "{}" is not a valid block ' + "name for file type " + "{}.".format(block_key, self.package_type) + ) + print(warning_str) + self._store_comment(line, found_first_block) + while line != "": + line = fd_input_file.readline() + self._store_comment(line, found_first_block) + arr_line = datautil.PyListUtil.split_data_line(line) + if arr_line and ( + len(arr_line[0]) <= 2 + or arr_line[0][:3].upper() == "END" + ): + break + else: + found_first_block = True + skip_block = False + cur_block = self.blocks[block_key] + if cur_block.loaded: + # Only blocks defined as repeating are allowed to have + # multiple entries + header_name = block_header_info.name + if not self.structure.blocks[ + header_name.lower() + ].repeating(): + # warn and skip block + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + warning_str = ( + 'WARNING: Block "{}" has ' + "multiple entries and is not " + "intended to be a repeating " + "block ({} package" + ")".format(header_name, self.package_type) + ) + print(warning_str) + skip_block = True + bhs = cur_block.structure.block_header_structure + bhval = block_header_info.variable_strings + if ( + len(bhs) > 0 + and len(bhval) > 0 + and bhs[0].name == "iper" + ): + nper = self.simulation_data.mfdata[ + ("tdis", "dimensions", "nper") + ].get_data() + bhval_int = datautil.DatumUtil.is_int(bhval[0]) + if not bhval_int or int(bhval[0]) > nper: + # skip block when block stress period is greater + # than nper + skip_block = True + + if not skip_block: + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print( + f" loading block {cur_block.structure.name}..." + ) + # reset comments + self.post_block_comments = MFComment( + "", self.path, self.simulation_data + ) + + cur_block.load( + block_header_info, fd_input_file, strict + ) + + # write post block comment + self.simulation_data.mfdata[ + cur_block.block_headers[-1].blk_post_comment_path + ] = self.post_block_comments + + blocks_read += 1 + if blocks_read >= max_blocks: + break + else: + # treat skipped block as if it is all comments + arr_line = datautil.PyListUtil.split_data_line( + clean_line + ) + self.post_block_comments.add_text(str(line), True) + while arr_line and ( + len(line) <= 2 or arr_line[0][:3].upper() != "END" + ): + line = fd_input_file.readline() + arr_line = datautil.PyListUtil.split_data_line( + line.strip() + ) + if arr_line: + self.post_block_comments.add_text( + str(line), True + ) + self.simulation_data.mfdata[ + cur_block.block_headers[-1].blk_post_comment_path + ] = self.post_block_comments + + else: + if not ( + len(clean_line) == 0 + or (len(line) > 2 and line[:3].upper() == "END") + ): + # Record file location of beginning of unresolved text + # treat unresolved text as a comment for now + self._store_comment(line, found_first_block) + + def write(self, ext_file_action=ExtFileAction.copy_relative_paths): + """Writes the package to a file. + + Parameters + ---------- + ext_file_action : ExtFileAction + How to handle pathing of external data files. + """ + if self.simulation_data.auto_set_sizes: + self._update_size_defs() + + # create any folders in path + package_file_path = self.get_file_path() + package_folder = os.path.split(package_file_path)[0] + if package_folder and not os.path.isdir(package_folder): + os.makedirs(os.path.split(package_file_path)[0]) + + # open file + fd = open(package_file_path, "w") + + # write flopy header + if self.simulation_data.write_headers: + dt = datetime.datetime.now() + header = ( + "# File generated by Flopy version {} on {} at {}." + "\n".format( + __version__, + dt.strftime("%m/%d/%Y"), + dt.strftime("%H:%M:%S"), + ) + ) + fd.write(header) + + # write blocks + self._write_blocks(fd, ext_file_action) + + fd.close() + + def create_package_dimensions(self): + """Creates a package dimensions object. For internal FloPy library + use. + + Returns + ------- + package dimensions : PackageDimensions + + """ + model_dims = None + if self.container_type[0] == PackageContainerType.model: + model_dims = [ + modeldimensions.ModelDimensions( + self.path[0], self.simulation_data + ) + ] + else: + # this is a simulation file that does not correspond to a specific + # model. figure out which model to use and return a dimensions + # object for that model + if self.dfn_file_name[0:3] == "exg": + exchange_rec_array = self.simulation_data.mfdata[ + ("nam", "exchanges", "exchanges") + ].get_data() + if exchange_rec_array is None: + return None + for exchange in exchange_rec_array: + if exchange[1].lower() == self._filename.lower(): + model_dims = [ + modeldimensions.ModelDimensions( + exchange[2], self.simulation_data + ), + modeldimensions.ModelDimensions( + exchange[3], self.simulation_data + ), + ] + break + elif ( + self.dfn_file_name[4:7] == "gnc" + and self.model_or_sim.type == "Simulation" + ): + # get exchange file name associated with gnc package + if self.parent_file is not None: + exg_file_name = self.parent_file.filename + else: + raise Exception( + "Can not create a simulation-level " + "gnc file without a corresponding " + "exchange file. Exchange file must be " + "created first." + ) + # get models associated with exchange file from sim nam file + try: + exchange_recarray_data = ( + self.model_or_sim.name_file.exchanges.get_data() + ) + except MFDataException as mfde: + message = ( + "An error occurred while retrieving exchange " + "data from the simulation name file. The error " + "occurred while processing gnc file " + f'"{self.filename}".' + ) + raise MFDataException( + mfdata_except=mfde, + package=self._get_pname(), + message=message, + ) + assert exchange_recarray_data is not None + model_1 = None + model_2 = None + for exchange in exchange_recarray_data: + if exchange[1] == exg_file_name: + model_1 = exchange[2] + model_2 = exchange[3] + + # assign models to gnc package + model_dims = [ + modeldimensions.ModelDimensions( + model_1, self.simulation_data + ), + modeldimensions.ModelDimensions( + model_2, self.simulation_data + ), + ] + elif self.parent_file is not None: + model_dims = [] + for md in self.parent_file.dimensions.model_dim: + model_name = md.model_name + model_dims.append( + modeldimensions.ModelDimensions( + model_name, self.simulation_data + ) + ) + else: + model_dims = [ + modeldimensions.ModelDimensions(None, self.simulation_data) + ] + return modeldimensions.PackageDimensions( + model_dims, self.structure, self.path + ) + + def _store_comment(self, line, found_first_block): + # Store comment + if found_first_block: + self.post_block_comments.text += line + else: + self.simulation_data.mfdata[ + self.path + ("pkg_hdr_comments",) + ].text += line + + def _write_blocks(self, fd, ext_file_action): + # verify that all blocks are valid + if not self.is_valid(): + message = ( + 'Unable to write out model file "{}" due to the ' + "following error: " + "{} ({})".format(self._filename, self.last_error, self.path) + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.model_name, + self._get_pname(), + self.path, + "writing package blocks", + None, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self.simulation_data.debug, + ) + + # write initial comments + pkg_hdr_comments_path = self.path + ("pkg_hdr_comments",) + if pkg_hdr_comments_path in self.simulation_data.mfdata: + self.simulation_data.mfdata[ + self.path + ("pkg_hdr_comments",) + ].write(fd, False) + + # loop through blocks + block_num = 1 + for block in self.blocks.values(): + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print(f" writing block {block.structure.name}...") + # write block + block.write(fd, ext_file_action=ext_file_action) + block_num += 1 + + def get_file_path(self): + """Returns the package file's path. + + Returns + ------- + file path : str + """ + if self.path[0] in self.simulation_data.mfpath.model_relative_path: + return os.path.join( + self.simulation_data.mfpath.get_model_path(self.path[0]), + self._filename, + ) + else: + return os.path.join( + self.simulation_data.mfpath.get_sim_path(), self._filename + ) + + def export(self, f, **kwargs): + """ + Method to export a package to netcdf or shapefile based on the + extension of the file name (.shp for shapefile, .nc for netcdf) + + Parameters + ---------- + f : str + Filename + kwargs : keyword arguments + modelgrid : flopy.discretization.Grid instance + User supplied modelgrid which can be used for exporting + in lieu of the modelgrid associated with the model object + + Returns + ------- + None or Netcdf object + + """ + from .. import export + + return export.utils.package_export(f, self, **kwargs) + + def plot(self, **kwargs): + """ + Plot 2-D, 3-D, transient 2-D, and stress period list (MfList) + package input data + + Parameters + ---------- + **kwargs : dict + filename_base : str + Base file name that will be used to automatically generate + file names for output image files. Plots will be exported as + image files if file_name_base is not None. (default is None) + file_extension : str + Valid matplotlib.pyplot file extension for savefig(). Only + used if filename_base is not None. (default is 'png') + mflay : int + MODFLOW zero-based layer number to return. If None, then all + all layers will be included. (default is None) + kper : int + MODFLOW zero-based stress period number to return. (default is + zero) + key : str + MfList dictionary key. (default is None) + + Returns + ------- + axes : list + Empty list is returned if filename_base is not None. Otherwise + a list of matplotlib.pyplot.axis are returned. + + """ + from ..plot.plotutil import PlotUtilities + + if not self.plottable: + raise TypeError("Simulation level packages are not plottable") + + axes = PlotUtilities._plot_package_helper(self, **kwargs) + return axes + + +class MFChildPackages: + """ + Behind the scenes code for creating an interface to access child packages + from a parent package. This class is automatically constructed by the + FloPy library and is for internal library use only. + + Parameters + ---------- + """ + + def __init__( + self, + model_or_sim, + parent, + pkg_type, + filerecord, + package=None, + package_class=None, + ): + self._packages = [] + self._filerecord = filerecord + if package is not None: + self._packages.append(package) + self._model_or_sim = model_or_sim + self._cpparent = parent + self._pkg_type = pkg_type + self._package_class = package_class + + def __init_subclass__(cls): + """Register package""" + super().__init_subclass__() + PackageContainer.packages_by_abbr[cls.package_abbr] = cls + + def __getattr__(self, attr): + if ( + "_packages" in self.__dict__ + and len(self._packages) > 0 + and hasattr(self._packages[0], attr) + ): + item = getattr(self._packages[0], attr) + return item + raise AttributeError(attr) + + def __getitem__(self, k): + if isinstance(k, int): + if k < len(self._packages): + return self._packages[k] + raise ValueError(f"Package index {k} does not exist.") + + def __setattr__(self, key, value): + if ( + key != "_packages" + and key != "_model_or_sim" + and key != "_cpparent" + and key != "_inattr" + and key != "_filerecord" + and key != "_package_class" + and key != "_pkg_type" + ): + if len(self._packages) == 0: + raise Exception( + "No {} package is currently attached to package" + " {}. Use the initialize method to create a(n) " + "{} package before attempting to access its " + "properties.".format( + self._pkg_type, self._cpparent.filename, self._pkg_type + ) + ) + package = self._packages[0] + setattr(package, key, value) + return + super().__setattr__(key, value) + + def __default_file_path_base(self, file_path, suffix=""): + stem = os.path.split(file_path)[1] + stem_lst = stem.split(".") + file_name = ".".join(stem_lst[:-1]) + if len(stem_lst) > 1: + file_ext = stem_lst[-1] + return f"{file_name}.{file_ext}{suffix}.{self._pkg_type}" + elif suffix != "": + return f"{stem}.{self._pkg_type}" + else: + return f"{stem}.{suffix}.{self._pkg_type}" + + def __file_path_taken(self, possible_path): + for package in self._packages: + # Do case insensitive compare + if package.filename.lower() == possible_path.lower(): + return True + return False + + def next_default_file_path(self): + possible_path = self.__default_file_path_base(self._cpparent.filename) + suffix = 0 + while self.__file_path_taken(possible_path): + possible_path = self.__default_file_path_base( + self._cpparent.filename, suffix + ) + suffix += 1 + return possible_path + + def init_package(self, package, fname, remove_packages=True): + if remove_packages: + # clear out existing packages + self._remove_packages() + elif fname is not None: + self._remove_packages(fname) + if fname is None: + # build a file name + fname = self.next_default_file_path() + package._filename = fname + # check file record variable + found = False + fr_data = self._filerecord.get_data() + if fr_data is not None: + for line in fr_data: + if line[0] == fname: + found = True + if not found: + # append file record variable + self._filerecord.append_data([(fname,)]) + # add the package to the list + self._packages.append(package) + + def _update_filename(self, old_fname, new_fname): + file_record = self._filerecord.get_data() + new_file_record_data = [] + if file_record is not None: + file_record_data = file_record[0] + for item in file_record_data: + base, fname = os.path.split(item) + if fname.lower() == old_fname.lower(): + if base: + new_file_record_data.append( + (os.path.join(base, new_fname),) + ) + else: + new_file_record_data.append((new_fname,)) + else: + new_file_record_data.append((item,)) + else: + new_file_record_data.append((new_fname,)) + self._filerecord.set_data(new_file_record_data) + + def _append_package(self, package, fname, update_frecord=True): + if fname is None: + # build a file name + fname = self.next_default_file_path() + package._filename = fname + + if update_frecord: + # set file record variable + file_record = self._filerecord.get_data() + file_record_data = file_record + new_file_record_data = [] + for item in file_record_data: + new_file_record_data.append((item[0],)) + new_file_record_data.append((fname,)) + self._filerecord.set_data(new_file_record_data) + + for existing_pkg in self._packages: + if existing_pkg is package: + # do not add the same package twice + return + # add the package to the list + self._packages.append(package) + + def _remove_packages(self, fname=None, only_pop_from_list=False): + rp_list = [] + for idx, package in enumerate(self._packages): + if fname is None or package.filename == fname: + if not only_pop_from_list: + self._model_or_sim.remove_package(package) + rp_list.append(idx) + for idx in reversed(rp_list): + self._packages.pop(idx) diff --git a/flopy/mf6/tmp/mfstructure.py b/flopy/mf6/tmp/mfstructure.py new file mode 100644 index 0000000000..92cd0be3b6 --- /dev/null +++ b/flopy/mf6/tmp/mfstructure.py @@ -0,0 +1,2113 @@ +""" +mfstructure module. Contains classes related to package structure + + +""" + +import ast +import keyword +import os +from enum import Enum +from textwrap import TextWrapper + +import numpy as np + +from ..mfbase import StructException + +numeric_index_text = ( + "This argument is an index variable, which means that " + "it should be treated as zero-based when working with " + "FloPy and Python. Flopy will automatically subtract " + "one when loading index variables and add one when " + "writing index variables." +) + + +class DfnType(Enum): + common = 1 + sim_name_file = 2 + sim_tdis_file = 3 + ims_file = 4 + exch_file = 5 + model_name_file = 6 + model_file = 7 + gnc_file = 8 + mvr_file = 9 + utl = 10 + mvt_file = 11 + unknown = 999 + + +class Dfn: + """ + Base class for package file definitions + + Attributes + ---------- + dfndir : path + folder containing package definition files (dfn) + common : path + file containing common information + + Methods + ------- + get_file_list : () : list + returns all of the dfn files found in dfndir. files are returned in + a specified order defined in the local variable file_order + + See Also + -------- + + Notes + ----- + + Examples + -------- + """ + + def __init__(self): + # directories + self.dfndir = os.path.join(".", "dfn") + self.common = os.path.join(self.dfndir, "common.dfn") + + def get_file_list(self): + file_order = [ + "sim-nam", # dfn completed tex updated + "sim-tdis", # dfn completed tex updated + "exg-gwfgwf", # dfn completed tex updated + "sln-ims", # dfn completed tex updated + "gwf-nam", # dfn completed tex updated + "gwf-dis", # dfn completed tex updated + "gwf-disv", # dfn completed tex updated + "gwf-disu", # dfn completed tex updated + "gwf-ic", # dfn completed tex updated + "gwf-npf", # dfn completed tex updated + "gwf-sto", # dfn completed tex updated + "gwf-hfb", # dfn completed tex updated + "gwf-chd", # dfn completed tex updated + "gwf-wel", # dfn completed tex updated + "gwf-drn", # dfn completed tex updated + "gwf-riv", # dfn completed tex updated + "gwf-ghb", # dfn completed tex updated + "gwf-rch", # dfn completed tex updated + "gwf-rcha", # dfn completed tex updated + "gwf-evt", # dfn completed tex updated + "gwf-evta", # dfn completed tex updated + "gwf-maw", # dfn completed tex updated + "gwf-sfr", # dfn completed tex updated + "gwf-lak", # dfn completed tex updated + "gwf-uzf", # dfn completed tex updated + "gwf-mvr", # dfn completed tex updated + "gwf-gnc", # dfn completed tex updated + "gwf-oc", # dfn completed tex updated + "utl-obs", + "utl-ts", + "utl-tab", + "utl-tas", + ] + + dfn_path, tail = os.path.split(os.path.realpath(__file__)) + dfn_path = os.path.join(dfn_path, "dfn") + # construct list of dfn files to process in the order of file_order + files = os.listdir(dfn_path) + for f in files: + if "common" in f or "flopy" in f: + continue + package_abbr = os.path.splitext(f)[0] + if package_abbr not in file_order: + file_order.append(package_abbr) + return [f"{fname}.dfn" for fname in file_order if f"{fname}.dfn" in files] + + def _file_type(self, file_name): + # determine file type + if len(file_name) >= 6 and file_name[0:6] == "common": + return DfnType.common, None + elif file_name[0:3] == "sim": + if file_name[3:6] == "nam": + return DfnType.sim_name_file, None + elif file_name[3:7] == "tdis": + return DfnType.sim_tdis_file, None + else: + return DfnType.unknown, None + elif file_name[0:3] == "nam": + return DfnType.sim_name_file, None + elif file_name[0:4] == "tdis": + return DfnType.sim_tdis_file, None + elif file_name[0:3] == "sln" or file_name[0:3] == "ims": + return DfnType.ims_file, None + elif file_name[0:3] == "exg": + return DfnType.exch_file, file_name[3:6] + elif file_name[0:3] == "utl": + return DfnType.utl, None + else: + model_type = file_name[0:3] + if file_name[3:6] == "nam": + return DfnType.model_name_file, model_type + elif file_name[3:6] == "gnc": + return DfnType.gnc_file, model_type + elif file_name[3:6] == "mvr": + return DfnType.mvr_file, model_type + elif file_name[3:6] == "mvt": + return DfnType.mvt_file, model_type + else: + return DfnType.model_file, model_type + + +class DfnPackage(Dfn): + """ + Dfn child class that loads dfn information from a list structure stored + in the auto-built package classes + + Attributes + ---------- + package : MFPackage + MFPackage subclass that contains dfn information + + Methods + ------- + get_block_structure_dict : (path : tuple, common : bool, model_file : + bool) : dict + returns a dictionary of block structure information for the package + + See Also + -------- + + Notes + ----- + + Examples + -------- + """ + + def __init__(self, package): + super().__init__() + self.package = package + self.package_type = package._package_type + self.dfn_file_name = package.dfn_file_name + # the package type is always the text after the last - + package_name = self.package_type.split("-") + self.package_type = package_name[-1] + if not isinstance(package_name, str) and len(package_name) > 1: + self.package_prefix = "".join(package_name[:-1]) + else: + self.package_prefix = "" + self.dfn_type, self.model_type = self._file_type( + self.dfn_file_name.replace("-", "") + ) + self.dfn_list = package.dfn + + def get_block_structure_dict(self, path, common, model_file, block_parent): + block_dict = {} + dataset_items_in_block = {} + self.dataset_items_needed_dict = {} + keystring_items_needed_dict = {} + current_block = None + + # get header dict + header_dict = {} + for item in self.dfn_list[0]: + if isinstance(item, str): + if item == "multi-package": + header_dict["multi-package"] = True + if item.startswith("package-type"): + header_dict["package-type"] = item.split(" ")[1] + for dfn_entry in self.dfn_list[1:]: + # load next data item + new_data_item_struct = MFDataItemStructure() + for next_line in dfn_entry: + new_data_item_struct.set_value(next_line, common) + # if block does not exist + if ( + current_block is None + or current_block.name != new_data_item_struct.block_name + ): + # create block + current_block = MFBlockStructure( + new_data_item_struct.block_name, + path, + model_file, + block_parent, + ) + # put block in block_dict + block_dict[current_block.name] = current_block + # init dataset item lookup + self.dataset_items_needed_dict = {} + dataset_items_in_block = {} + + # resolve block type + if len(current_block.block_header_structure) > 0: + if ( + len(current_block.block_header_structure[0].data_item_structures) + > 0 + and current_block.block_header_structure[0] + .data_item_structures[0] + .type + == DatumType.integer + ): + block_type = BlockType.transient + else: + block_type = BlockType.multiple + else: + block_type = BlockType.single + + if new_data_item_struct.block_variable: + block_dataset_struct = MFDataStructure( + new_data_item_struct, + model_file, + self.package_type, + self.dfn_list, + ) + block_dataset_struct.parent_block = current_block + self._process_needed_data_items( + block_dataset_struct, dataset_items_in_block + ) + block_dataset_struct.set_path(path + (new_data_item_struct.block_name,)) + block_dataset_struct.add_item(new_data_item_struct) + current_block.add_dataset(block_dataset_struct) + else: + new_data_item_struct.block_type = block_type + dataset_items_in_block[new_data_item_struct.name] = new_data_item_struct + + # if data item belongs to existing dataset(s) + item_location_found = False + if new_data_item_struct.name in self.dataset_items_needed_dict: + if new_data_item_struct.type == DatumType.record: + # record within a record - create a data set in + # place of the data item + new_data_item_struct = self._new_dataset( + new_data_item_struct, + current_block, + dataset_items_in_block, + path, + model_file, + False, + ) + new_data_item_struct.record_within_record = True + + for dataset in self.dataset_items_needed_dict[ + new_data_item_struct.name + ]: + item_added = dataset.add_item(new_data_item_struct, record=True) + item_location_found = item_location_found or item_added + # if data item belongs to an existing keystring + if new_data_item_struct.name in keystring_items_needed_dict: + new_data_item_struct.set_path( + keystring_items_needed_dict[new_data_item_struct.name].path + ) + if new_data_item_struct.type == DatumType.record: + # record within a keystring - create a data set in + # place of the data item + new_data_item_struct = self._new_dataset( + new_data_item_struct, + current_block, + dataset_items_in_block, + path, + model_file, + False, + ) + keystring_items_needed_dict[ + new_data_item_struct.name + ].keystring_dict[new_data_item_struct.name] = new_data_item_struct + item_location_found = True + + if new_data_item_struct.type == DatumType.keystring: + # add keystrings to search list + for ( + key, + val, + ) in new_data_item_struct.keystring_dict.items(): + keystring_items_needed_dict[key] = new_data_item_struct + + # if data set does not exist + if not item_location_found: + self._new_dataset( + new_data_item_struct, + current_block, + dataset_items_in_block, + path, + model_file, + True, + ) + if ( + current_block.name.upper() == "SOLUTIONGROUP" + and len(current_block.block_header_structure) == 0 + ): + # solution_group a special case for now + block_data_item_struct = MFDataItemStructure() + block_data_item_struct.name = "order_num" + block_data_item_struct.data_items = ["order_num"] + block_data_item_struct.type = DatumType.integer + block_data_item_struct.longname = "order_num" + block_data_item_struct.description = ( + "internal variable to keep track of solution group number" + ) + block_dataset_struct = MFDataStructure( + block_data_item_struct, + model_file, + self.package_type, + self.dfn_list, + ) + block_dataset_struct.parent_block = current_block + block_dataset_struct.set_path( + path + (new_data_item_struct.block_name,) + ) + block_dataset_struct.add_item(block_data_item_struct) + current_block.add_dataset(block_dataset_struct) + return block_dict, header_dict + + def _new_dataset( + self, + new_data_item_struct, + current_block, + dataset_items_in_block, + path, + model_file, + add_to_block=True, + ): + current_dataset_struct = MFDataStructure( + new_data_item_struct, model_file, self.package_type, self.dfn_list + ) + current_dataset_struct.set_path(path + (new_data_item_struct.block_name,)) + self._process_needed_data_items(current_dataset_struct, dataset_items_in_block) + if add_to_block: + # add dataset + current_block.add_dataset(current_dataset_struct) + current_dataset_struct.parent_block = current_block + current_dataset_struct.add_item(new_data_item_struct) + return current_dataset_struct + + def _process_needed_data_items( + self, current_dataset_struct, dataset_items_in_block + ): + # add data items needed to dictionary + for ( + item_name, + val, + ) in current_dataset_struct.expected_data_items.items(): + if item_name in dataset_items_in_block: + current_dataset_struct.add_item(dataset_items_in_block[item_name]) + else: + if item_name in self.dataset_items_needed_dict: + self.dataset_items_needed_dict[item_name].append( + current_dataset_struct + ) + else: + self.dataset_items_needed_dict[item_name] = [current_dataset_struct] + + +class DataType(Enum): + """ + Types of data that can be found in a package file + """ + + scalar_keyword = 1 + scalar = 2 + array = 3 + array_transient = 4 + list = 5 + list_transient = 6 + list_multiple = 7 + scalar_transient = 8 + scalar_keyword_transient = 9 + + +class DatumType(Enum): + """ + Types of individual pieces of data + """ + + keyword = 1 + integer = 2 + double_precision = 3 + string = 4 + constant = 5 + list_defined = 6 + keystring = 7 + record = 8 + repeating_record = 9 + recarray = 10 + + +class BlockType(Enum): + """ + Types of blocks that can be found in a package file + """ + + single = 1 + multiple = 2 + transient = 3 + + +class MFDataItemStructure: + """ + Defines the structure of a single MF6 data item in a dfn file + + Attributes + ---------- + block_name : str + name of block that data item is in + name : str + name of data item + name_list : list + list of alternate names for the data item, includes data item's main + name "name" + python_name : str + name of data item referenced in python, with illegal python characters + removed + type : str + type of the data item as it appears in the dfn file + type_obj : python type + type of the data item as a python type + valid_values : list + list of valid values for the data item. if empty, this constraint does + not apply + data_items : list + list of data items contained in this data_item, including itself + in_record : bool + in_record attribute as appears in dfn file + tagged : bool + whether data item is tagged. if the data item is tagged its name is + included in the MF6 input file + just_data : bool + when just_data is true only data appears in the MF6 input file. + otherwise, name information appears + shape : list + describes the shape of the data + layer_dims : list + which dimensions in the shape function as layers, if None defaults to + "layer" + reader : basestring + reader that MF6 uses to read the data + optional : bool + whether data item is optional or required as part of the MFData in the + MF6 input file + longname : str + long name of the data item + description : str + description of the data item + path : tuple + a tuple describing the data item's location within the simulation + (,,,) + repeating : bool + whether or not the data item can repeat in the MF6 input file + block_variable : bool + if true, this data item is part of the block header + block_type : BlockType + whether the block containing this item is a single non-repeating block, + a multiple repeating block, or a transient repeating block + keystring_dict : dict + dictionary containing acceptable keystrings if this data item is of + type keystring + is_cellid : bool + true if this data item is definitely of type cellid + possible_cellid : bool + true if this data item may be of type cellid + ucase : bool + this data item must be displayed in upper case in the MF6 input file + + Methods + ------- + remove_cellid : (resolved_shape : list, cellid_size : int) + removes the cellid size from the shape of a data item + set_path : (path : tuple) + sets the path to this data item to path + get_rec_type : () : object type + gets the type of object of this data item to be used in a numpy + recarray + + See Also + -------- + + Notes + ----- + + Examples + -------- + """ + + def __init__(self): + self.file_name_keywords = {"filein": False, "fileout": False} + self.file_name_key_seq = {"fname": True} + self.contained_keywords = {"fname": True, "file": True, "tdis6": True} + self.block_name = None + self.name = None + self.display_name = None + self.name_length = None + self.is_aux = False + self.is_boundname = False + self.is_mname = False + self.name_list = [] + self.python_name = None + self.type = None + self.type_string = None + self.type_obj = None + self.valid_values = [] + self.data_items = None + self.in_record = False + self.tagged = True + self.just_data = False + self.shape = [] + self.layer_dims = ["nlay"] + self.reader = None + self.optional = False + self.longname = None + self.description = "" + self.path = None + self.repeating = False + self.block_variable = False + self.block_type = BlockType.single + self.keystring_dict = {} + self.is_cellid = False + self.possible_cellid = False + self.ucase = False + self.preserve_case = False + self.default_value = None + self.numeric_index = False + self.support_negative_index = False + self.construct_package = None + self.construct_data = None + self.parameter_name = None + self.one_per_pkg = False + self.jagged_array = None + self.netcdf = False + + def set_value(self, line, common): + arr_line = line.strip().split() + if len(arr_line) > 1: + if arr_line[0] == "block": + self.block_name = " ".join(arr_line[1:]) + elif arr_line[0] == "name": + if self.type == DatumType.keyword: + # display keyword names in upper case + self.display_name = " ".join(arr_line[1:]).upper() + else: + self.display_name = " ".join(arr_line[1:]).lower() + self.name = " ".join(arr_line[1:]).lower() + self.name_list.append(self.name) + if len(self.name) >= 6 and self.name[0:6] == "cellid": + self.is_cellid = True + if ( + self.name + and self.name[0:2] == "id" + and self.type == DatumType.string + ): + self.possible_cellid = True + self.python_name = self.name.replace("-", "_").lower() + # don't allow name to be a python keyword + if keyword.iskeyword(self.name): + self.python_name = f"{self.python_name}_" + # performance optimizations + if self.name == "aux": + self.is_aux = True + if self.name == "boundname": + self.is_boundname = True + if self.name[0:5] == "mname": + self.is_mname = True + self.name_length = len(self.name) + elif arr_line[0] == "other_names": + arr_names = " ".join(arr_line[1:]).lower().split(",") + for name in arr_names: + self.name_list.append(name) + elif arr_line[0] == "type": + if self.support_negative_index: + # type already automatically set when + # support_negative_index flag is set + return + type_line = arr_line[1:] + if len(type_line) <= 0: + raise StructException( + 'Data structure "{}" does not have a type specified.'.format( + self.name + ), + self.path, + ) + self.type_string = type_line[0].lower() + self.type = self._str_to_enum_type(type_line[0]) + if ( + self.name + and self.name[0:2] == "id" + and self.type == DatumType.string + ): + self.possible_cellid = True + if ( + self.type == DatumType.recarray + or self.type == DatumType.record + or self.type == DatumType.repeating_record + or self.type == DatumType.keystring + ): + self.data_items = type_line[1:] + if self.type == DatumType.keystring: + for item in self.data_items: + self.keystring_dict[item.lower()] = 0 + else: + self.data_items = [self.name] + self.type_obj = self._get_type() + if self.type == DatumType.keyword: + # display keyword names in upper case + if self.display_name is not None: + self.display_name = self.display_name.upper() + elif arr_line[0] == "valid": + for value in arr_line[1:]: + self.valid_values.append(value) + elif arr_line[0] == "in_record": + self.in_record = self._get_boolean_val(arr_line) + elif arr_line[0] == "tagged": + self.tagged = self._get_boolean_val(arr_line) + elif arr_line[0] == "just_data": + self.just_data = self._get_boolean_val(arr_line) + elif arr_line[0] == "shape": + if len(arr_line) > 1: + self.shape = [] + for dimension in arr_line[1:]: + if dimension[-1] != ";": + dimension = dimension.replace("(", "") + dimension = dimension.replace(")", "") + dimension = dimension.replace(",", "") + if dimension[0] == "*": + dimension = dimension.replace("*", "") + # set as a "layer" dimension + self.layer_dims.insert(0, dimension) + self.shape.append(dimension) + else: + # only process what is after the last ; which by + # convention is the most generalized form of the + # shape + self.shape = [] + if len(self.shape) > 0: + self.repeating = True + elif arr_line[0] == "reader": + self.reader = " ".join(arr_line[1:]) + elif arr_line[0] == "optional": + self.optional = self._get_boolean_val(arr_line) + elif arr_line[0] == "longname": + self.longname = " ".join(arr_line[1:]) + elif arr_line[0] == "description": + if arr_line[1] == "REPLACE": + self.description = self._resolve_common(arr_line, common) + elif len(arr_line) > 1 and arr_line[1].strip(): + self.description = " ".join(arr_line[1:]) + + # clean self.description + replace_pairs = [ + ("``", '"'), # double quotes + ("''", '"'), + ("`", "'"), # single quotes + ("~", " "), # non-breaking space + (r"\mf", "MODFLOW 6"), + (r"\citep{konikow2009}", "(Konikow et al., 2009)"), + (r"\citep{hill1990preconditioned}", "(Hill, 1990)"), + (r"\ref{table:ftype}", "in mf6io.pdf"), + (r"\ref{table:gwf-obstypetable}", "in mf6io.pdf"), + ] + for s1, s2 in replace_pairs: + if s1 in self.description: + self.description = self.description.replace(s1, s2) + + # massage latex equations + self.description = self.description.replace("$<$", "<") + self.description = self.description.replace("$>$", ">") + if "$" in self.description: + descsplit = self.description.split("$") + mylist = [ + i.replace("\\", "") + ":math:`" + j.replace("\\", "\\\\") + "`" + for i, j in zip(descsplit[::2], descsplit[1::2]) + ] + mylist.append(descsplit[-1].replace("\\", "")) + self.description = "".join(mylist) + else: + self.description = self.description.replace("\\", "") + elif arr_line[0] == "block_variable": + if len(arr_line) > 1: + self.block_variable = bool(arr_line[1]) + elif arr_line[0] == "ucase": + if len(arr_line) > 1: + self.ucase = bool(arr_line[1]) + elif arr_line[0] == "preserve_case": + self.preserve_case = self._get_boolean_val(arr_line) + elif arr_line[0] == "default_value": + self.default_value = " ".join(arr_line[1:]) + elif arr_line[0] == "numeric_index": + self.numeric_index = self._get_boolean_val(arr_line) + elif arr_line[0] == "support_negative_index": + self.support_negative_index = self._get_boolean_val(arr_line) + # must be double precision to support 0 and -0 + self.type_string = "double_precision" + self.type = self._str_to_enum_type(self.type_string) + self.type_obj = self._get_type() + elif arr_line[0] == "construct_package": + self.construct_package = arr_line[1] + elif arr_line[0] == "construct_data": + self.construct_data = arr_line[1] + elif arr_line[0] == "parameter_name": + self.parameter_name = arr_line[1] + elif arr_line[0] == "one_per_pkg": + self.one_per_pkg = bool(arr_line[1]) + elif arr_line[0] == "jagged_array": + self.jagged_array = arr_line[1] + elif arr_line[0] == "netcdf": + self.netcdf = arr_line[1] + + def get_type_string(self): + return f"[{self.type_string}]" + + def get_description(self, line_size, initial_indent, level_indent): + item_desc = f"* {self.name} ({self.type_string}) {self.description}" + if self.numeric_index or self.is_cellid: + # append zero-based index text + item_desc = f"{item_desc} {numeric_index_text}" + twr = TextWrapper( + width=line_size, + initial_indent=initial_indent, + drop_whitespace=True, + subsequent_indent=f" {initial_indent}", + ) + item_desc = "\n".join(twr.wrap(item_desc)) + return item_desc + + def get_doc_string(self, line_size, initial_indent, level_indent): + description = self.get_description( + line_size, initial_indent + level_indent, level_indent + ) + param_doc_string = f"{self.python_name} : {self.get_type_string()}" + twr = TextWrapper( + width=line_size, + initial_indent=initial_indent, + subsequent_indent=f" {initial_indent}", + drop_whitespace=True, + ) + param_doc_string = "\n".join(twr.wrap(param_doc_string)) + param_doc_string = f"{param_doc_string}\n{description}" + return param_doc_string + + def get_keystring_desc(self, line_size, initial_indent, level_indent): + if self.type != DatumType.keystring: + raise StructException( + f'Can not get keystring description for "{self.name}" ' + "because it is not a keystring", + self.path, + ) + + # get description of keystring elements + description = "" + for key, item in self.keystring_dict.items(): + if description: + description = f"{description}\n" + description = "{}{}".format( + description, + item.get_doc_string(line_size, initial_indent, level_indent), + ) + return description + + def file_nam_in_nam_file(self): + for key, item in self.contained_keywords.items(): + if self.name.lower().find(key) != -1: + return True + + def indicates_file_name(self): + if self.name.lower() in self.file_name_keywords: + return True + for key in self.file_name_key_seq.keys(): + if key in self.name.lower(): + return True + return False + + def is_file_name(self): + if ( + self.name.lower() in self.file_name_keywords + and self.file_name_keywords[self.name.lower()] is True + ): + return True + for key, item in self.contained_keywords.items(): + if self.name.lower().find(key) != -1 and item is True: + return True + return False + + @staticmethod + def remove_cellid(resolved_shape, cellid_size): + # remove the cellid size from the shape + for dimension, index in zip(resolved_shape, range(0, len(resolved_shape))): + if dimension == cellid_size: + resolved_shape[index] = 1 + break + + @staticmethod + def _get_boolean_val(bool_option_line): + if len(bool_option_line) <= 1: + return False + if bool_option_line[1].lower() == "true": + return True + return False + + @staticmethod + def _find_close_bracket(arr_line): + for index, word in enumerate(arr_line): + word = word.strip() + if len(word) > 0 and word[-1] == "}": + return index + return None + + @staticmethod + def _resolve_common(arr_line, common): + if common is None: + return arr_line + if not (arr_line[2] in common and len(arr_line) >= 4): + raise StructException(f'Could not find line "{arr_line}" in common dfn.') + close_bracket_loc = MFDataItemStructure._find_close_bracket(arr_line[2:]) + resolved_str = common[arr_line[2]] + if close_bracket_loc is None: + find_replace_str = " ".join(arr_line[3:]) + else: + close_bracket_loc += 3 + find_replace_str = " ".join(arr_line[3:close_bracket_loc]) + find_replace_dict = ast.literal_eval(find_replace_str) + for find_str, replace_str in find_replace_dict.items(): + resolved_str = resolved_str.replace(find_str, replace_str) + # clean up formatting + resolved_str = resolved_str.replace("\\texttt", "") + resolved_str = resolved_str.replace("{", "") + resolved_str = resolved_str.replace("}", "") + + return resolved_str + + def set_path(self, path): + self.path = path + (self.name,) + mfstruct = MFStructure() + for dimension in self.shape: + dim_path = path + (dimension,) + if dim_path in mfstruct.dimension_dict: + mfstruct.dimension_dict[dim_path].append(self) + else: + mfstruct.dimension_dict[dim_path] = [self] + + def _get_type(self): + if self.type == DatumType.double_precision: + return float + elif self.type == DatumType.integer: + return int + elif self.type == DatumType.constant: + return bool + elif self.type == DatumType.string: + return str + elif self.type == DatumType.list_defined: + return str + return str + + def _str_to_enum_type(self, type_string): + if type_string.lower() == "keyword": + return DatumType.keyword + elif type_string.lower() == "integer": + return DatumType.integer + elif ( + type_string.lower() == "double_precision" or type_string.lower() == "double" + ): + return DatumType.double_precision + elif type_string.lower() == "string": + return DatumType.string + elif type_string.lower() == "constant": + return DatumType.constant + elif type_string.lower() == "list-defined": + return DatumType.list_defined + elif type_string.lower() == "keystring": + return DatumType.keystring + elif type_string.lower() == "record": + return DatumType.record + elif type_string.lower() == "recarray": + return DatumType.recarray + elif type_string.lower() == "repeating_record": + return DatumType.repeating_record + else: + exc_text = f'Data item type "{type_string}" not supported.' + raise StructException(exc_text, self.path) + + def get_rec_type(self): + item_type = self.type_obj + if item_type == str or self.is_cellid or self.possible_cellid: + return object + return item_type + + +class MFDataStructure: + """ + Defines the structure of a single MF6 data item in a dfn file + + Parameters + ---------- + data_item : MFDataItemStructure + base data item associated with this data structure + model_data : bool + whether or not this is part of a model + package_type : str + abbreviated package type + + Attributes + ---------- + type : str + type of the data as it appears in the dfn file + path : tuple + a tuple describing the data's location within the simulation + (,,,) + optional : bool + whether data is optional or required as part of the MFBlock in the MF6 + input file + name : str + name of data item + name_list : list + list of alternate names for the data, includes data item's main name + "name" + python_name : str + name of data referenced in python, with illegal python characters + removed + longname : str + long name of the data + repeating : bool + whether or not the data can repeat in the MF6 input file + layered : bool + whether this data can appear by layer + num_data_items : int + number of data item structures contained in this MFDataStructure, + including itself + record_within_record : bool + true if this MFDataStructure is a record within a container + MFDataStructure + file_data : bool + true if data points to a file + block_type : BlockType + whether the block containing this data is a single non-repeating block, + a multiple repeating block, or a transient repeating block + block_variable : bool + if true, this data is part of the block header + model_data : bool + if true, data is part of a model + num_optional : int + number of optional data items + parent_block : MFBlockStructure + parent block structure object + data_item_structures : list + list of data item structures contained in this MFDataStructure + expected_data_items : dict + dictionary of expected data item names for quick lookup + shape : tuple + shape of first data item + + Methods + ------- + get_keywords : () : list + returns a list of all keywords associated with this data + supports_aux : () : bool + returns true of this data supports aux variables + add_item : (item : MFDataItemStructure, record : bool) + adds a data item to this MFDataStructure + set_path : (path : tuple) + sets the path describing the data's location within the simulation + (,,,) + get_datatype : () : DataType + returns the DataType of this data (array, list, scalar, ...) + get_min_record_entries : () : int + gets the minimum number of entries, as entered in a package file, + for a single record. excludes optional data items + get_record_size : () : int + gets the number of data items, excluding keyword data items, in this + MFDataStructure + all_keywords : () : bool + returns true of all data items are keywords + get_type_string : () : str + returns descriptive string of the data types in this MFDataStructure + get_description : () : str + returns a description of the data + get_type_array : (type_array : list): + builds an array of data type information in type_array + get_datum_type : (numpy_type : bool): + returns the object type of the first data item in this MFDataStructure + with a standard type. if numpy_type is true returns the type as a + numpy type + get_data_item_types: () : list + returns a list of object type for every data item in this + MFDataStructure + first_non_keyword_index : () : int + return the index of the first data item in this MFDataStructure that is + not a keyword + + See Also + -------- + + Notes + ----- + + Examples + -------- + """ + + def __init__(self, data_item, model_data, package_type, dfn_list): + self.type = data_item.type + self.package_type = package_type + self.path = None + self.optional = data_item.optional + self.name = data_item.name + self.block_name = data_item.block_name + self.name_length = len(self.name) + self.is_aux = data_item.is_aux + self.is_boundname = data_item.is_boundname + self.name_list = data_item.name_list + self.python_name = data_item.python_name + self.longname = data_item.longname + self.default_value = data_item.default_value + self.repeating = False + self.layered = ( + "nlay" in data_item.shape + or "nodes" in data_item.shape + or len(data_item.layer_dims) > 1 + ) + self.netcdf = data_item.netcdf + self.num_data_items = len(data_item.data_items) + self.record_within_record = False + self.file_data = False + self.nam_file_data = False + self.block_type = data_item.block_type + self.block_variable = data_item.block_variable + self.model_data = model_data + self.num_optional = 0 + self.parent_block = None + self._fpmerge_data_item(data_item, dfn_list) + self.construct_package = data_item.construct_package + self.construct_data = data_item.construct_data + self.parameter_name = data_item.parameter_name + self.one_per_pkg = data_item.one_per_pkg + + self.data_item_structures = [] + self.expected_data_items = {} + self.shape = data_item.shape + if ( + self.type == DatumType.recarray + or self.type == DatumType.record + or self.type == DatumType.repeating_record + ): + # record expected data for later error checking + for data_item_name in data_item.data_items: + self.expected_data_items[data_item_name] = len(self.expected_data_items) + else: + self.expected_data_items[data_item.name] = len(self.expected_data_items) + + @property + def basic_item(self): + if not self.parent_block.parent_package.stress_package: + return False + for item in self.data_item_structures: + if ( + ( + (item.repeating or item.optional) + and not (item.is_cellid or item.is_aux or item.is_boundname) + ) + or item.jagged_array is not None + or item.type == DatumType.keystring + or item.type == DatumType.keyword + or ( + item.description is not None + and "keyword `NONE'" in item.description + ) + ): + return False + return True + + @property + def is_mname(self): + for item in self.data_item_structures: + if item.is_mname: + return True + return False + + def get_item(self, item_name): + for item in self.data_item_structures: + if item.name.lower() == item_name.lower(): + return item + return None + + def get_keywords(self): + keywords = [] + if ( + self.type == DatumType.recarray + or self.type == DatumType.record + or self.type == DatumType.repeating_record + ): + for data_item_struct in self.data_item_structures: + if data_item_struct.type == DatumType.keyword: + if len(keywords) == 0: + # create first keyword tuple + for name in data_item_struct.name_list: + keywords.append((name,)) + else: + # update all keyword tuples with latest keyword found + new_keywords = [] + for keyword_tuple in keywords: + for name in data_item_struct.name_list: + new_keywords.append(keyword_tuple + (name,)) + if data_item_struct.optional: + keywords = keywords + new_keywords + else: + keywords = new_keywords + elif data_item_struct.type == DatumType.keystring: + for keyword_item in data_item_struct.data_items: + keywords.append((keyword_item,)) + elif len(keywords) == 0: + if len(data_item_struct.valid_values) > 0: + new_keywords = [] + # loop through all valid values and append to the end + # of each keyword tuple + for valid_value in data_item_struct.valid_values: + if len(keywords) == 0: + new_keywords.append((valid_value,)) + else: + for keyword_tuple in keywords: + new_keywords.append(keyword_tuple + (valid_value,)) + keywords = new_keywords + else: + for name in data_item_struct.name_list: + keywords.append((name,)) + else: + for name in self.name_list: + keywords.append((name,)) + return keywords + + def supports_aux(self): + for data_item_struct in self.data_item_structures: + if data_item_struct.name.lower() == "aux": + return True + return False + + def add_item(self, item, record=False, dfn_list=None): + item_added = False + if item.type != DatumType.recarray and ( + (item.type != DatumType.record and item.type != DatumType.repeating_record) + or record is True + ): + if item.name not in self.expected_data_items: + raise StructException( + 'Could not find data item "{}" in ' + "expected data items of data structure " + "{}.".format(item.name, self.name), + self.path, + ) + item.set_path(self.path) + if len(self.data_item_structures) == 0: + self.keyword = item.name + # insert data item into correct location in array + location = self.expected_data_items[item.name] + if len(self.data_item_structures) > location: + # TODO: ask about this condition and remove + if self.data_item_structures[location] is None: + # verify that this is not a placeholder value + if self.data_item_structures[location] is not None: + raise StructException( + 'Data structure "{}" already ' + 'has the item named "{}"' + ".".format(self.name, item.name), + self.path, + ) + if isinstance(item, MFDataItemStructure): + self.nam_file_data = ( + self.nam_file_data or item.file_nam_in_nam_file() + ) + self.file_data = self.file_data or item.indicates_file_name() + # replace placeholder value + self.data_item_structures[location] = item + item_added = True + else: + for index in range(0, location - len(self.data_item_structures)): + # insert placeholder in array + self.data_item_structures.append(None) + if isinstance(item, MFDataItemStructure): + self.nam_file_data = ( + self.nam_file_data or item.file_nam_in_nam_file() + ) + self.file_data = self.file_data or item.indicates_file_name() + self.data_item_structures.append(item) + item_added = True + self.optional = self.optional and item.optional + if item.optional: + self.num_optional += 1 + if item_added: + self._fpmerge_data_item(item, dfn_list) + return item_added + + def _fpmerge_data_item(self, item, dfn_list): + mfstruct = MFStructure() + # check for flopy-specific dfn data + if item.name.lower() in mfstruct.flopy_dict: + # read flopy-specific dfn data + for name, value in mfstruct.flopy_dict[item.name.lower()].items(): + line = f"{name} {value}" + item.set_value(line, None) + if dfn_list is not None: + dfn_list[-1].append(line) + + def set_path(self, path): + self.path = path + (self.name,) + + def get_datatype(self): + if self.type == DatumType.recarray: + if self.block_type != BlockType.single and not self.block_variable: + if self.block_type == BlockType.transient: + return DataType.list_transient + else: + return DataType.list_multiple + else: + return DataType.list + if self.type == DatumType.record or self.type == DatumType.repeating_record: + record_size, repeating_data_item = self.get_record_size() + if (record_size >= 1 and not self.all_keywords()) or repeating_data_item: + if self.block_type != BlockType.single and not self.block_variable: + if self.block_type == BlockType.transient: + return DataType.list_transient + else: + return DataType.list_multiple + else: + return DataType.list + else: + if self.block_type != BlockType.single and not self.block_variable: + return DataType.scalar_transient + else: + return DataType.scalar + elif ( + len(self.data_item_structures) > 0 + and self.data_item_structures[0].repeating + ): + if self.data_item_structures[0].type == DatumType.string: + return DataType.list + else: + if self.block_type == BlockType.single: + return DataType.array + else: + return DataType.array_transient + elif ( + len(self.data_item_structures) > 0 + and self.data_item_structures[0].type == DatumType.keyword + ): + if self.block_type != BlockType.single and not self.block_variable: + return DataType.scalar_keyword_transient + else: + return DataType.scalar_keyword + else: + if self.block_type != BlockType.single and not self.block_variable: + return DataType.scalar_transient + else: + return DataType.scalar + + def is_mult_or_trans(self): + data_type = self.get_datatype() + if ( + data_type == DataType.scalar_keyword_transient + or data_type == DataType.array_transient + or data_type == DataType.list_transient + or data_type == DataType.list_multiple + ): + return True + return False + + def get_min_record_entries(self): + count = 0 + for data_item_structure in self.data_item_structures: + if not data_item_structure.optional: + if data_item_structure.type == DatumType.record: + count += data_item_structure.get_record_size()[0] + else: + if data_item_structure.type != DatumType.keyword: + count += 1 + return count + + def get_record_size(self): + count = 0 + repeating = False + for data_item_structure in self.data_item_structures: + if data_item_structure.type == DatumType.record: + count += data_item_structure.get_record_size()[0] + else: + if data_item_structure.type != DatumType.keyword or count > 0: + if data_item_structure.repeating: + # count repeats as one extra record + repeating = True + count += 1 + return count, repeating + + def all_keywords(self): + for data_item_structure in self.data_item_structures: + if data_item_structure.type == DatumType.record: + if not data_item_structure.all_keywords(): + return False + else: + if data_item_structure.type != DatumType.keyword: + return False + return True + + def get_type_string(self): + type_array = [] + self.get_docstring_type_array(type_array) + type_string = ", ".join(type_array) + type_header = "" + type_footer = "" + if len(self.data_item_structures) > 1 or self.data_item_structures[0].repeating: + type_header = "[" + type_footer = "]" + if self.repeating: + type_footer = f"] ... [{type_string}]" + + return f"{type_header}{type_string}{type_footer}" + + def get_docstring_type_array(self, type_array): + for index, item in enumerate(self.data_item_structures): + if item.type == DatumType.record: + item.get_docstring_type_array(type_array) + else: + if self.display_item(index): + if ( + self.type == DatumType.recarray + or self.type == DatumType.record + or self.type == DatumType.repeating_record + ): + type_array.append(str(item.name)) + else: + type_array.append(str(self._resolve_item_type(item))) + + def get_description( + self, line_size=79, initial_indent=" ", level_indent=" " + ): + type_array = [] + self.get_type_array(type_array) + description = "" + for datastr, index, itype in type_array: + item = datastr.data_item_structures[index] + if item is None: + continue + if item.type == DatumType.record: + item_desc = item.get_description( + line_size, initial_indent + level_indent, level_indent + ) + description = f"{description}\n{item_desc}" + elif datastr.display_item(index): + if len(description.strip()) > 0: + description = f"{description}\n" + item_desc = item.description + if item.numeric_index or item.is_cellid: + # append zero-based index text + item_desc = f"{item_desc} {numeric_index_text}" + + item_desc = f"* {item.name} ({itype}) {item_desc}" + twr = TextWrapper( + width=line_size, + initial_indent=initial_indent, + subsequent_indent=f" {initial_indent}", + ) + item_desc = "\n".join(twr.wrap(item_desc)) + description = f"{description}{item_desc}" + if item.type == DatumType.keystring: + keystr_desc = item.get_keystring_desc( + line_size, initial_indent + level_indent, level_indent + ) + description = f"{description}\n{keystr_desc}" + return description + + def get_subpackage_description( + self, line_size=79, initial_indent=" ", level_indent=" " + ): + item_desc = ( + "* Contains data for the {} package. Data can be " + "stored in a dictionary containing data for the {} " + "package with variable names as keys and package data as " + "values. Data just for the {} variable is also " + "acceptable. See {} package documentation for more " + "information" + ".".format( + self.construct_package, + self.construct_package, + self.parameter_name, + self.construct_package, + ) + ) + twr = TextWrapper( + width=line_size, + initial_indent=initial_indent, + subsequent_indent=f" {initial_indent}", + ) + return "\n".join(twr.wrap(item_desc)) + + def get_doc_string(self, line_size=79, initial_indent=" ", level_indent=" "): + if self.parameter_name is not None: + description = self.get_subpackage_description( + line_size, initial_indent + level_indent, level_indent + ) + var_name = self.parameter_name + type_name = f"{{varname:data}} or {self.construct_data} data" + else: + description = self.get_description( + line_size, initial_indent + level_indent, level_indent + ) + var_name = self.python_name + type_name = self.get_type_string() + + param_doc_string = f"{var_name} : {type_name}" + twr = TextWrapper( + width=line_size, + initial_indent=initial_indent, + subsequent_indent=f" {initial_indent}", + ) + param_doc_string = "\n".join(twr.wrap(param_doc_string)) + param_doc_string = f"{param_doc_string}\n{description}" + return param_doc_string + + def get_type_array(self, type_array): + for index, item in enumerate(self.data_item_structures): + if item.type == DatumType.record: + item.get_type_array(type_array) + else: + if self.display_item(index): + type_array.append( + ( + self, + index, + str(self._resolve_item_type(item)), + ) + ) + + def _resolve_item_type(self, item): + item_type = item.type_string + first_nk_idx = self.first_non_keyword_index() + # single keyword is type boolean + if item_type == "keyword" and len(self.data_item_structures) == 1: + item_type = "boolean" + if item.is_cellid: + item_type = "(integer, ...)" + # two keywords + if len(self.data_item_structures) == 2 and first_nk_idx is None: + # keyword type is string + item_type = "string" + return item_type + + def display_item(self, item_num): + item = self.data_item_structures[item_num] + first_nk_idx = self.first_non_keyword_index() + # all keywords excluded if there is a non-keyword + if not (item.type == DatumType.keyword and first_nk_idx is not None): + # ignore first keyword if there are two keywords + if ( + len(self.data_item_structures) == 2 + and first_nk_idx is None + and item_num == 0 + ): + return False + return True + return False + + def get_datum_type(self, numpy_type=False, return_enum_type=False): + data_item_types = self.get_data_item_types() + for var_type in data_item_types: + if ( + var_type[0] == DatumType.double_precision + or var_type[0] == DatumType.integer + or var_type[0] == DatumType.string + ): + if return_enum_type: + return var_type[0] + else: + if numpy_type: + if var_type[0] == DatumType.double_precision: + return np.float64 + elif var_type[0] == DatumType.integer: + return np.int32 + else: + return object + else: + return var_type[2] + return None + + def get_data_item_types(self): + data_item_types = [] + for data_item in self.data_item_structures: + if data_item.type == DatumType.record: + # record within a record + data_item_types += data_item.get_data_item_types() + else: + data_item_types.append( + [data_item.type, data_item.type_string, data_item.type_obj] + ) + return data_item_types + + def first_non_keyword_index(self): + for data_item, index in zip( + self.data_item_structures, range(0, len(self.data_item_structures)) + ): + if data_item.type != DatumType.keyword: + return index + return None + + def get_model(self): + if self.model_data: + if len(self.path) >= 1: + return self.path[0] + return None + + def get_package(self): + if self.model_data: + if len(self.path) >= 2: + return self.path[1] + else: + if len(self.path) >= 1: + return self.path[0] + return "" + + +class MFBlockStructure: + """ + Defines the structure of a MF6 block. + + + Parameters + ---------- + name : string + block name + path : tuple + tuple that describes location of block within simulation + (, , ) + model_block : bool + true if this block is part of a model + + Attributes + ---------- + name : string + block name + path : tuple + tuple that describes location of block within simulation + (, , ) + model_block : bool + true if this block is part of a model + data_structures : dict + dictionary of data items in this block, with the data item name as + the key + block_header_structure : list + list of data items that are part of this block's "header" + + Methods + ------- + repeating() : bool + Returns true if more than one instance of this block can appear in a + MF6 package file + add_dataset(dataset : MFDataStructure, block_header_dataset : bool) + Adds dataset to this block, as a header dataset of block_header_dataset + is true + number_non_optional_data() : int + Returns the number of non-optional non-header data structures in + this block + number_non_optional_block_header_data() : int + Returns the number of non-optional block header data structures in + this block + get_data_structure(path : tuple) : MFDataStructure + Returns the data structure in this block with name defined by path[0]. + If name does not exist, returns None. + get_all_recarrays() : list + Returns all data non-header data structures in this block that are of + type recarray + + See Also + -------- + + Notes + ----- + + Examples + -------- + + + """ + + def __init__(self, name, path, model_block, parent_package): + # initialize + self.data_structures = {} + self.block_header_structure = [] + self.name = name + self.path = path + (self.name,) + self.model_block = model_block + self.parent_package = parent_package + + def repeating(self): + if len(self.block_header_structure) > 0: + return True + return False + + def add_dataset(self, dataset): + dataset.set_path(self.path) + if dataset.block_variable: + self.block_header_structure.append(dataset) + else: + self.data_structures[dataset.name] = dataset + + def number_non_optional_data(self): + num = 0 + for key, data_structure in self.data_structures.items(): + if not data_structure.optional: + num += 1 + return num + + def number_non_optional_block_header_data(self): + if ( + len(self.block_header_structure) > 0 + and not self.block_header_structure[0].optional + ): + return 1 + else: + return 0 + + def get_data_structure(self, path): + if path[0] in self.data_structures: + return self.data_structures[path[0]] + else: + return None + + def get_all_recarrays(self): + recarray_list = [] + for ds_key, item in self.data_structures.items(): + if item.type == DatumType.recarray: + recarray_list.append(item) + return recarray_list + + +class MFInputFileStructure: + """ + MODFLOW Input File Structure class. Loads file + structure information for individual input file + types. + + + Parameters + ---------- + dfn_file : string + the definition file used to define the structure of this input file + path : tuple + path defining the location of the container of this input file + structure within the overall simulation structure + common : bool + is this the common dfn file + model_file : bool + this file belongs to a specific model type + + Attributes + ---------- + valid : bool + simulation structure validity + path : tuple + path defining the location of this input file structure within the + overall simulation structure + read_as_arrays : bool + if this input file structure is the READASARRAYS version of a package + + Methods + ------- + is_valid() : bool + Checks all structures objects within the file for validity + get_data_structure(path : string) + Returns a data structure of it exists, otherwise returns None. Data + structure type returned is based on the tuple/list "path" + + See Also + -------- + + Notes + ----- + + Examples + -------- + + """ + + def __init__(self, dfn_file, path, common, model_file): + # initialize + self.file_type = dfn_file.package_type + self.file_prefix = dfn_file.package_prefix + self.dfn_type = dfn_file.dfn_type + self.dfn_file_name = dfn_file.dfn_file_name + self.description = "" + self.path = path + (self.file_type,) + self.model_file = model_file # file belongs to a specific model + self.read_as_arrays = False + + self.blocks, self.header = dfn_file.get_block_structure_dict( + self.path, + common, + model_file, + self, + ) + self.has_packagedata = "packagedata" in self.blocks + self.has_perioddata = "period" in self.blocks + self.multi_package_support = "multi-package" in self.header + self.stress_package = ( + "package-type" in self.header + and self.header["package-type"] == "stress-package" + ) + self.advanced_stress_package = ( + "package-type" in self.header + and self.header["package-type"] == "advanced-stress-package" + ) + self.dfn_list = dfn_file.dfn_list + self.sub_package = self._sub_package() + + def advanced_package(self): + return self.has_packagedata and self.has_perioddata + + def _sub_package(self): + mfstruct = MFStructure() + for value in mfstruct.flopy_dict.values(): + if value is not None and "construct_package" in value: + if self.file_type == value["construct_package"]: + return True + return False + + def is_valid(self): + valid = True + for block in self.blocks: + valid = valid and block.is_valid() + return valid + + def get_data_structure(self, path): + if isinstance(path, tuple) or isinstance(path, list): + if path[0] in self.blocks: + return self.blocks[path[0]].get_data_structure(path[1:]) + else: + return None + else: + for block in self.blocks: + if path in block.data_structures: + return block.data_structures[path] + return None + + +class MFModelStructure: + """ + Defines the structure of a MF6 model and its packages + + Parameters + ---------- + model_type : string + abbreviation of model type + + Attributes + ---------- + valid : bool + simulation structure validity + name_file_struct_obj : MFInputFileStructure + describes the structure of the simulation name file + package_struct_objs : dict + describes the structure of the simulation packages + model_type : string + dictionary containing simulation package structure + + Methods + ------- + add_namefile : (dfn_file : DfnFile, model_file=True : bool) + Adds a namefile structure object to the model + add_package(dfn_file : DfnFile, model_file=True : bool) + Adds a package structure object to the model + is_valid() : bool + Checks all structures objects within the model for validity + get_data_structure(path : string) + Returns a data structure of it exists, otherwise returns None. Data + structure type returned is based on the tuple/list "path" + + See Also + -------- + + Notes + ----- + + Examples + -------- + """ + + def __init__(self, model_type, utl_struct_objs): + # add name file structure + self.model_type = model_type + self.name_file_struct_obj = None + self.package_struct_objs = {} + self.utl_struct_objs = utl_struct_objs + + def add_namefile(self, dfn_file, common): + self.name_file_struct_obj = MFInputFileStructure( + dfn_file, (self.model_type,), common, True + ) + + def add_package(self, dfn_file, common): + self.package_struct_objs[dfn_file.package_type] = MFInputFileStructure( + dfn_file, (self.model_type,), common, True + ) + + def get_package_struct(self, package_type): + if package_type in self.package_struct_objs: + return self.package_struct_objs[package_type] + elif package_type in self.utl_struct_objs: + return self.utl_struct_objs[package_type] + else: + return None + + def is_valid(self): + valid = True + for package_struct in self.package_struct_objs: + valid = valid and package_struct.is_valid() + return valid + + def get_data_structure(self, path): + if path[0] in self.package_struct_objs: + if len(path) > 1: + return self.package_struct_objs[path[0]].get_data_structure(path[1:]) + else: + return self.package_struct_objs[path[0]] + elif path[0] == "nam": + if len(path) > 1: + return self.name_file_struct_obj.get_data_structure(path[1:]) + else: + return self.name_file_struct_obj + else: + return None + + +class MFSimulationStructure: + """ + Defines the structure of a MF6 simulation and its packages + and models. + + Parameters + ---------- + + Attributes + ---------- + name_file_struct_obj : MFInputFileStructure + describes the structure of the simulation name file + package_struct_objs : dict + describes the structure of the simulation packages + model_struct_objs : dict + describes the structure of the supported model types + utl_struct_objs : dict + describes the structure of the supported utility packages + common : dict + common file information + model_type : string + placeholder + + Methods + ------- + process_dfn : (dfn_file : DfnFile) + reads in the contents of a dfn file, storing that contents in the + appropriate object + add_namefile : (dfn_file : DfnFile, model_file=True : bool) + Adds a namefile structure object to the simulation + add_util : (dfn_file : DfnFile) + Adds a utility package structure object to the simulation + add_package(dfn_file : DfnFile, model_file=True : bool) + Adds a package structure object to the simulation + store_common(dfn_file : DfnFile) + Stores the contents of the common dfn file + add_model(model_type : string) + Adds a model structure object to the simulation + is_valid() : bool + Checks all structures objects within the simulation for validity + get_data_structure(path : string) + Returns a data structure of it exists, otherwise returns None. Data + structure type returned is based on the tuple/list "path" + tag_read_as_arrays + Searches through all packages and tags any packages with a name that + indicates they are the READASARRAYS version of a package. + + See Also + -------- + + Notes + ----- + + Examples + -------- + """ + + def __init__(self): + # initialize + self.name_file_struct_obj = None + self.package_struct_objs = {} + self.utl_struct_objs = {} + self.model_struct_objs = {} + self.common = None + self.model_type = "" + + @property + def model_types(self): + model_type_list = [] + for model in self.model_struct_objs.values(): + model_type_list.append(model.model_type[:-1]) + return model_type_list + + def process_dfn(self, dfn_file): + if dfn_file.dfn_type == DfnType.common: + self.store_common(dfn_file) + elif dfn_file.dfn_type == DfnType.sim_name_file: + self.add_namefile(dfn_file, False) + elif ( + dfn_file.dfn_type == DfnType.sim_tdis_file + or dfn_file.dfn_type == DfnType.exch_file + or dfn_file.dfn_type == DfnType.ims_file + ): + self.add_package(dfn_file, False) + elif dfn_file.dfn_type == DfnType.utl: + self.add_util(dfn_file) + elif ( + dfn_file.dfn_type == DfnType.model_file + or dfn_file.dfn_type == DfnType.model_name_file + or dfn_file.dfn_type == DfnType.gnc_file + or dfn_file.dfn_type == DfnType.mvr_file + or dfn_file.dfn_type == DfnType.mvt_file + ): + model_ver = f"{dfn_file.model_type}{MFStructure().get_version_string()}" + if model_ver not in self.model_struct_objs: + self.add_model(model_ver) + if dfn_file.dfn_type == DfnType.model_file: + self.model_struct_objs[model_ver].add_package(dfn_file, self.common) + elif ( + dfn_file.dfn_type == DfnType.gnc_file + or dfn_file.dfn_type == DfnType.mvr_file + or dfn_file.dfn_type == DfnType.mvt_file + ): + # gnc and mvr files belong both on the simulation and model + # level + self.model_struct_objs[model_ver].add_package(dfn_file, self.common) + self.add_package(dfn_file, False) + else: + self.model_struct_objs[model_ver].add_namefile(dfn_file, self.common) + + def add_namefile(self, dfn_file, model_file=True): + self.name_file_struct_obj = MFInputFileStructure( + dfn_file, (), self.common, model_file + ) + + def add_util(self, dfn_file): + self.utl_struct_objs[dfn_file.package_type] = MFInputFileStructure( + dfn_file, (), self.common, True + ) + + def add_package(self, dfn_file, model_file=True): + self.package_struct_objs[dfn_file.package_type] = MFInputFileStructure( + dfn_file, (), self.common, model_file + ) + + def store_common(self, dfn_file): + # store common stuff + self.common = dfn_file.dict_by_name() + + def add_model(self, model_type): + self.model_struct_objs[model_type] = MFModelStructure( + model_type, self.utl_struct_objs + ) + + def is_valid(self): + valid = True + for package_struct in self.package_struct_objs: + valid = valid and package_struct.is_valid() + for model_struct in self.model_struct_objs: + valid = valid and model_struct.is_valid() + return valid + + def get_data_structure(self, path): + if path[0] in self.package_struct_objs: + if len(path) > 1: + return self.package_struct_objs[path[0]].get_data_structure(path[1:]) + else: + return self.package_struct_objs[path[0]] + elif path[0] in self.model_struct_objs: + if len(path) > 1: + return self.model_struct_objs[path[0]].get_data_structure(path[1:]) + else: + return self.model_struct_objs[path[0]] + elif path[0] in self.utl_struct_objs: + if len(path) > 1: + return self.utl_struct_objs[path[0]].get_data_structure(path[1:]) + else: + return self.utl_struct_objs[path[0]] + elif path[0] == "nam": + if len(path) > 1: + return self.name_file_struct_obj.get_data_structure(path[1:]) + else: + return self.name_file_struct_obj + else: + return None + + def tag_read_as_arrays(self): + for key, package_struct in self.package_struct_objs.items(): + if ( + package_struct.get_data_structure(("options", "readasarrays")) + or package_struct.get_data_structure(("options", "readarraylayer")) + or package_struct.get_data_structure(("options", "readarraygrid")) + ): + package_struct.read_as_arrays = True + for model_key, model_struct in self.model_struct_objs.items(): + for ( + key, + package_struct, + ) in model_struct.package_struct_objs.items(): + if ( + package_struct.get_data_structure(("options", "readasarrays")) + or package_struct.get_data_structure(("options", "readarraylayer")) + or package_struct.get_data_structure(("options", "readarraygrid")) + ): + package_struct.read_as_arrays = True + + +class MFStructure: + """ + Singleton class for accessing the contents of the json structure file + (only one instance of this class can exist, which loads the json file on + initialization) + + Parameters + ---------- + mf_version : int + version of MODFLOW + valid : bool + whether the structure information loaded from the dfn files is valid + sim_struct : MFSimulationStructure + Object containing file structure for all simulation files + dimension_dict : dict + Dictionary mapping paths to dimension information to the dataitem whose + dimension information is being described + """ + + _instance = None + + def __new__(cls): + if cls._instance is None: + cls._instance = super().__new__(cls) + + # Initialize variables + cls._instance.mf_version = 6 + cls._instance.sim_struct = None + cls._instance.dimension_dict = {} + cls._instance.flopy_dict = {} + + # Read metadata from file + cls._instance.valid = cls._instance._load_structure() + + return cls._instance + + def get_version_string(self): + return format(str(self.mf_version)) + + def _load_structure(self): + # set up structure classes + self.sim_struct = MFSimulationStructure() + + # initialize flopy dict keys + MFStructure().flopy_dict["solution_packages"] = {} + + from ..mfpackage import MFPackage + + for package in MFPackage.__subclasses__(): + # process header + for entry in package.dfn[0][1:]: + if isinstance(entry, list) and entry[0] == "solution_package": + MFStructure().flopy_dict["solution_packages"][ + package.package_abbr + ] = entry[1:] + # process each package + self.sim_struct.process_dfn(DfnPackage(package)) + self.sim_struct.tag_read_as_arrays() + + return True diff --git a/flopy/mf6/tmp/ruff/2/mfmodel.py b/flopy/mf6/tmp/ruff/2/mfmodel.py new file mode 100644 index 0000000000..45c31b722b --- /dev/null +++ b/flopy/mf6/tmp/ruff/2/mfmodel.py @@ -0,0 +1,2256 @@ +import inspect +import os +import sys +import warnings +from typing import Optional, Union + +import numpy as np + +from ..discretization.grid import Grid +from ..discretization.modeltime import ModelTime +from ..discretization.structuredgrid import StructuredGrid +from ..discretization.unstructuredgrid import UnstructuredGrid +from ..discretization.vertexgrid import VertexGrid +from ..mbase import ModelInterface +from ..utils import datautil +from ..utils.check import mf6check +from .coordinates import modeldimensions +from .data import mfdata, mfdatalist, mfstructure +from .data.mfdatautil import DataSearchOutput, iterable +from .mfbase import ( + ExtFileAction, + FlopyException, + MFDataException, + MFFileMgmt, + PackageContainer, + PackageContainerType, + ReadAsArraysException, + VerbosityLevel, +) +from .mfpackage import MFPackage +from .utils.mfenums import DiscretizationType +from .utils.output_util import MF6Output + + +class MFModel(ModelInterface): + """ + MODFLOW-6 model base class. Represents a single model in a simulation. + + Parameters + ---------- + simulation_data : MFSimulationData + Simulation data object of the simulation this model will belong to + structure : MFModelStructure + Structure of this type of model + modelname : str + Name of the model + model_nam_file : str + Relative path to the model name file from model working folder + version : str + Version of modflow + exe_name : str + Model executable name + model_ws : str + Model working folder path + disfile : str + Relative path to dis file from model working folder + grid_type : str + Type of grid the model will use (structured, unstructured, vertices) + verbose : bool + Verbose setting for model operations (default False) + + Attributes + ---------- + name : str + Name of the model + exe_name : str + Model executable name + packages : dict of MFPackage + Dictionary of model packages + + """ + + def __init__( + self, + simulation, + model_type="gwf6", + modelname="model", + model_nam_file=None, + version="mf6", + exe_name="mf6", + add_to_simulation=True, + structure=None, + model_rel_path=".", + verbose=False, + **kwargs, + ): + self._package_container = PackageContainer(simulation.simulation_data) + self.simulation = simulation + self.simulation_data = simulation.simulation_data + self.name = modelname + self.name_file = None + self._version = version + self.model_type = model_type + self.type = "Model" + + if model_nam_file is None: + model_nam_file = f"{modelname}.nam" + + if add_to_simulation: + self.structure = simulation.register_model( + self, model_type, modelname, model_nam_file + ) + else: + self.structure = structure + self.set_model_relative_path(model_rel_path) + self.exe_name = exe_name + self.dimensions = modeldimensions.ModelDimensions( + self.name, self.simulation_data + ) + self.simulation_data.model_dimensions[modelname] = self.dimensions + self._ftype_num_dict = {} + self._package_paths = {} + self._verbose = verbose + + if model_nam_file is None: + self.model_nam_file = f"{modelname}.nam" + else: + self.model_nam_file = model_nam_file + + # check for spatial reference info in kwargs + xll = kwargs.pop("xll", None) + yll = kwargs.pop("yll", None) + self._xul = kwargs.pop("xul", None) + self._yul = kwargs.pop("yul", None) + rotation = kwargs.pop("rotation", 0.0) + crs = kwargs.pop("crs", None) + # build model grid object + self._modelgrid = Grid(crs=crs, xoff=xll, yoff=yll, angrot=rotation) + + self.start_datetime = None + # check for extraneous kwargs + if len(kwargs) > 0: + kwargs_str = ", ".join(kwargs.keys()) + excpt_str = ( + f'Extraneous kwargs "{kwargs_str}" provided to MFModel.' + ) + raise FlopyException(excpt_str) + + # build model name file + # create name file based on model type - support different model types + package_obj = PackageContainer.package_factory("nam", model_type[0:3]) + if not package_obj: + excpt_str = ( + f"Name file could not be found for model{model_type[0:3]}." + ) + raise FlopyException(excpt_str) + + self.name_file = package_obj( + self, + filename=self.model_nam_file, + pname=self.name, + _internal_package=True, + ) + + def __init_subclass__(cls): + """Register model type""" + super().__init_subclass__() + PackageContainer.modflow_models.append(cls) + PackageContainer.models_by_type[cls.model_type] = cls + + def __getattr__(self, item): + """ + __getattr__ - used to allow for getting packages as if they are + attributes + + Parameters + ---------- + item : str + 3 character package name (case insensitive) + + + Returns + ------- + pp : Package object + Package object of type :class:`flopy.pakbase.Package` + + """ + if item == "name_file" or not hasattr(self, "name_file"): + raise AttributeError(item) + + package = self.get_package(item) + if package is not None: + return package + raise AttributeError(item) + + def __setattr__(self, name, value): + if hasattr(self, name) and getattr(self, name) is not None: + attribute = object.__getattribute__(self, name) + if attribute is not None and isinstance(attribute, mfdata.MFData): + try: + if isinstance(attribute, mfdatalist.MFList): + attribute.set_data(value, autofill=True) + else: + attribute.set_data(value) + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self.name, + package="", + ) + return + super().__setattr__(name, value) + + def __repr__(self): + return self._get_data_str(True) + + def __str__(self): + return self._get_data_str(False) + + def _get_data_str(self, formal): + file_mgr = self.simulation_data.mfpath + data_str = ( + "name = {}\nmodel_type = {}\nversion = {}\nmodel_" + "relative_path = {}" + "\n\n".format( + self.name, + self.model_type, + self.version, + file_mgr.model_relative_path[self.name], + ) + ) + + for package in self.packagelist: + pk_str = package._get_data_str(formal, False) + if formal: + if len(pk_str.strip()) > 0: + data_str = ( + "{}###################\nPackage {}\n" + "###################\n\n" + "{}\n".format(data_str, package._get_pname(), pk_str) + ) + else: + pk_str = package._get_data_str(formal, False) + if len(pk_str.strip()) > 0: + data_str = ( + "{}###################\nPackage {}\n" + "###################\n\n" + "{}\n".format(data_str, package._get_pname(), pk_str) + ) + return data_str + + @property + def package_key_dict(self): + """ + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_type_dict + + @property + def package_dict(self): + """Returns a copy of the package name dictionary. + + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_dict + + @property + def package_names(self): + """Returns a list of package names. + + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_names + + @property + def package_type_dict(self): + """ + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_type_dict + + @property + def package_name_dict(self): + """ + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_name_dict + + @property + def package_filename_dict(self): + """ + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_filename_dict + + @property + def nper(self): + """Number of stress periods. + + Returns + ------- + nper : int + Number of stress periods in the simulation. + + """ + try: + return self.simulation.tdis.nper.array + except AttributeError: + return None + + @property + def modeltime(self): + """Model time discretization information. + + Returns + ------- + modeltime : ModelTime + FloPy object containing time discretization information for the + simulation. + + """ + tdis = self.simulation.get_package("tdis", type_only=True) + period_data = tdis.perioddata.get_data() + + # build steady state data + sto = self.get_package("sto", type_only=True) + if sto is None: + steady = np.full((len(period_data["perlen"])), True, dtype=bool) + else: + steady = np.full((len(period_data["perlen"])), False, dtype=bool) + ss_periods = sto.steady_state.get_active_key_dict() + for period, val in ss_periods.items(): + if val: + ss_periods[period] = sto.steady_state.get_data(period) + tr_periods = sto.transient.get_active_key_dict() + for period, val in tr_periods.items(): + if val: + tr_periods[period] = sto.transient.get_data(period) + if ss_periods: + last_ss_value = False + # loop through steady state array + for index, value in enumerate(steady): + # resolve if current index is steady state or transient + if index in ss_periods and ss_periods[index]: + last_ss_value = True + elif index in tr_periods and tr_periods[index]: + last_ss_value = False + if last_ss_value is True: + steady[index] = True + + # build model time + itmuni = tdis.time_units.get_data() + start_date_time = tdis.start_date_time.get_data() + + self._model_time = ModelTime( + perlen=period_data["perlen"], + nstp=period_data["nstp"], + tsmult=period_data["tsmult"], + time_units=itmuni, + start_datetime=start_date_time, + steady_state=steady + ) + return self._model_time + + @property + def modeldiscrit(self): + """Basic model spatial discretization information. This is used + internally prior to model spatial discretization information being + fully loaded. + + Returns + ------- + model grid : Grid subclass + FloPy object containing basic spatial discretization information + for the model. + + """ + if self.get_grid_type() == DiscretizationType.DIS: + dis = self.get_package("dis") + return StructuredGrid( + nlay=dis.nlay.get_data(), + nrow=dis.nrow.get_data(), + ncol=dis.ncol.get_data(), + ) + elif self.get_grid_type() == DiscretizationType.DISV: + dis = self.get_package("disv") + return VertexGrid( + ncpl=dis.ncpl.get_data(), nlay=dis.nlay.get_data() + ) + elif self.get_grid_type() == DiscretizationType.DISU: + dis = self.get_package("disu") + nodes = dis.nodes.get_data() + ncpl = np.array([nodes], dtype=int) + return UnstructuredGrid(ncpl=ncpl) + + @property + def modelgrid(self): + """Model spatial discretization information. + + Returns + ------- + model grid : Grid subclass + FloPy object containing spatial discretization information for the + model. + + """ + force_resync = False + if not self._mg_resync: + return self._modelgrid + if self.get_grid_type() == DiscretizationType.DIS: + dis = self.get_package("dis") + if not hasattr(dis, "_init_complete"): + if not hasattr(dis, "delr"): + # dis package has not yet been initialized + return self._modelgrid + else: + # dis package has been partially initialized + self._modelgrid = StructuredGrid( + delc=dis.delc.array, + delr=dis.delr.array, + top=None, + botm=None, + idomain=None, + lenuni=None, + crs=self._modelgrid.crs, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot, + ) + else: + botm = dis.botm.array + idomain = dis.idomain.array + if idomain is None: + force_resync = True + idomain = self._resolve_idomain(idomain, botm) + self._modelgrid = StructuredGrid( + delc=dis.delc.array, + delr=dis.delr.array, + top=dis.top.array, + botm=botm, + idomain=idomain, + lenuni=dis.length_units.array, + crs=self._modelgrid.crs, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot, + ) + elif self.get_grid_type() == DiscretizationType.DISV: + dis = self.get_package("disv") + if not hasattr(dis, "_init_complete"): + if not hasattr(dis, "cell2d"): + # disv package has not yet been initialized + return self._modelgrid + else: + # disv package has been partially initialized + self._modelgrid = VertexGrid( + vertices=dis.vertices.array, + cell2d=dis.cell2d.array, + top=None, + botm=None, + idomain=None, + lenuni=None, + crs=self._modelgrid.crs, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot, + ) + else: + botm = dis.botm.array + idomain = dis.idomain.array + if idomain is None: + force_resync = True + idomain = self._resolve_idomain(idomain, botm) + self._modelgrid = VertexGrid( + vertices=dis.vertices.array, + cell2d=dis.cell2d.array, + top=dis.top.array, + botm=botm, + idomain=idomain, + lenuni=dis.length_units.array, + crs=self._modelgrid.crs, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot, + ) + elif self.get_grid_type() == DiscretizationType.DISU: + dis = self.get_package("disu") + if not hasattr(dis, "_init_complete"): + # disu package has not yet been fully initialized + return self._modelgrid + + # check to see if ncpl can be constructed from ihc array, + # otherwise set ncpl equal to [nodes] + ihc = dis.ihc.array + iac = dis.iac.array + ncpl = UnstructuredGrid.ncpl_from_ihc(ihc, iac) + if ncpl is None: + ncpl = np.array([dis.nodes.get_data()], dtype=int) + cell2d = dis.cell2d.array + idomain = dis.idomain.array + if idomain is None: + idomain = np.ones(dis.nodes.array, dtype=int) + if cell2d is None: + if ( + self.simulation.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + print( + "WARNING: cell2d information missing. Functionality of " + "the UnstructuredGrid will be limited." + ) + + vertices = dis.vertices.array + if vertices is None: + if ( + self.simulation.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + print( + "WARNING: vertices information missing. Functionality " + "of the UnstructuredGrid will be limited." + ) + vertices = None + else: + vertices = np.array(vertices) + + self._modelgrid = UnstructuredGrid( + vertices=vertices, + cell2d=cell2d, + top=dis.top.array, + botm=dis.bot.array, + idomain=idomain, + lenuni=dis.length_units.array, + ncpl=ncpl, + crs=self._modelgrid.crs, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot, + iac=dis.iac.array, + ja=dis.ja.array, + ) + elif self.get_grid_type() == DiscretizationType.DISV1D: + dis = self.get_package("disv1d") + if not hasattr(dis, "_init_complete"): + if not hasattr(dis, "cell1d"): + # disv package has not yet been initialized + return self._modelgrid + else: + # disv package has been partially initialized + self._modelgrid = VertexGrid( + vertices=dis.vertices.array, + cell1d=dis.cell1d.array, + top=None, + botm=None, + idomain=None, + lenuni=None, + crs=self._modelgrid.crs, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot, + ) + else: + botm = dis.bottom.array + idomain = dis.idomain.array + if idomain is None: + force_resync = True + idomain = self._resolve_idomain(idomain, botm) + self._modelgrid = VertexGrid( + vertices=dis.vertices.array, + cell1d=dis.cell1d.array, + top=None, + botm=botm, + idomain=idomain, + lenuni=dis.length_units.array, + crs=self._modelgrid.crs, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot, + ) + elif self.get_grid_type() == DiscretizationType.DIS2D: + dis = self.get_package("dis2d") + if not hasattr(dis, "_init_complete"): + if not hasattr(dis, "delr"): + # dis package has not yet been initialized + return self._modelgrid + else: + # dis package has been partially initialized + self._modelgrid = StructuredGrid( + delc=dis.delc.array, + delr=dis.delr.array, + top=None, + botm=None, + idomain=None, + lenuni=None, + crs=self._modelgrid.crs, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot, + ) + else: + botm = dis.bottom.array + idomain = dis.idomain.array + if idomain is None: + force_resync = True + idomain = self._resolve_idomain(idomain, botm) + self._modelgrid = StructuredGrid( + delc=dis.delc.array, + delr=dis.delr.array, + top=None, + botm=botm, + idomain=idomain, + lenuni=dis.length_units.array, + crs=self._modelgrid.crs, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot, + ) + elif self.get_grid_type() == DiscretizationType.DISV2D: + dis = self.get_package("disv2d") + if not hasattr(dis, "_init_complete"): + if not hasattr(dis, "cell2d"): + # disv package has not yet been initialized + return self._modelgrid + else: + # disv package has been partially initialized + self._modelgrid = VertexGrid( + vertices=dis.vertices.array, + cell2d=dis.cell2d.array, + top=None, + botm=None, + idomain=None, + lenuni=None, + crs=self._modelgrid.crs, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot, + ) + else: + botm = dis.bottom.array + idomain = dis.idomain.array + if idomain is None: + force_resync = True + idomain = self._resolve_idomain(idomain, botm) + self._modelgrid = VertexGrid( + vertices=dis.vertices.array, + cell2d=dis.cell2d.array, + top=None, + botm=botm, + idomain=idomain, + lenuni=dis.length_units.array, + crs=self._modelgrid.crs, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot, + ) + else: + return self._modelgrid + + # get coordinate data from dis file + xorig = dis.xorigin.get_data() + yorig = dis.yorigin.get_data() + angrot = dis.angrot.get_data() + + # resolve offsets + if xorig is None: + xorig = self._modelgrid.xoffset + if xorig is None: + if self._xul is not None: + xorig = self._modelgrid._xul_to_xll(self._xul) + else: + xorig = 0.0 + if yorig is None: + yorig = self._modelgrid.yoffset + if yorig is None: + if self._yul is not None: + yorig = self._modelgrid._yul_to_yll(self._yul) + else: + yorig = 0.0 + if angrot is None: + angrot = self._modelgrid.angrot + self._modelgrid.set_coord_info( + xorig, + yorig, + angrot, + self._modelgrid.crs, + ) + self._mg_resync = not self._modelgrid.is_complete or force_resync + return self._modelgrid + + @property + def packagelist(self): + """List of model packages.""" + return self._package_container.packagelist + + @property + def namefile(self): + """Model namefile object.""" + return self.model_nam_file + + @property + def model_ws(self): + """Model file path.""" + file_mgr = self.simulation_data.mfpath + return file_mgr.get_model_path(self.name) + + @property + def exename(self): + """MODFLOW executable name""" + return self.exe_name + + @property + def version(self): + """Version of MODFLOW""" + return self._version + + @property + def solver_tols(self): + """Returns the solver inner hclose and rclose values. + + Returns + ------- + inner_hclose, rclose : float, float + + """ + ims = self.get_ims_package() + if ims is not None: + rclose = ims.rcloserecord.get_data() + if rclose is not None: + rclose = rclose[0][0] + return ims.inner_hclose.get_data(), rclose + return None + + @property + def laytyp(self): + """Layering type""" + try: + return self.npf.icelltype.array + except AttributeError: + return None + + @property + def hdry(self): + """Dry cell value""" + return -1e30 + + @property + def hnoflo(self): + """No-flow cell value""" + return 1e30 + + @property + def laycbd(self): + """Quasi-3D confining bed. Not supported in MODFLOW-6. + + Returns + ------- + None : None + + """ + return None + + @property + def output(self): + budgetkey = None + if self.model_type == "gwt6": + budgetkey = "MASS BUDGET FOR ENTIRE MODEL" + try: + return MF6Output(self.oc, budgetkey=budgetkey) + except AttributeError: + return MF6Output(self, budgetkey=budgetkey) + + def export(self, f, **kwargs): + """Method to export a model to a shapefile or netcdf file + + Parameters + ---------- + f : str + File name (".nc" for netcdf or ".shp" for shapefile) + or dictionary of .... + **kwargs : keyword arguments + modelgrid: flopy.discretization.Grid + User supplied modelgrid object which will supersede the built + in modelgrid object + if fmt is set to 'vtk', parameters of Vtk initializer + + """ + from ..export import utils + + return utils.model_export(f, self, **kwargs) + + @property + def verbose(self): + """Verbose setting for model operations (True/False)""" + return self._verbose + + @verbose.setter + def verbose(self, verbose): + """Verbose setting for model operations (True/False)""" + self._verbose = verbose + + def check(self, f=None, verbose=True, level=1): + """ + Check model data for common errors. + + Warning + ------- + The MF6 check mechanism is deprecated pending reimplementation + in a future release. While the checks API will remain in place + through 3.x, it may be unstable, and will likely change in 4.x. + + Parameters + ---------- + f : str or file handle + String defining file name or file handle for summary file + of check method output. If a string is passed a file handle + is created. If f is None, check method does not write + results to a summary file. (default is None) + verbose : bool + Boolean flag used to determine if check method results are + written to the screen + level : int + Check method analysis level. If level=0, summary checks are + performed. If level=1, full checks are performed. + + Returns + ------- + success : bool + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow.load('model.nam') + >>> m.check() + """ + + # check instance for model-level check + chk = mf6check(self, f=f, verbose=verbose, level=level) + + return self._check(chk, level) + + @staticmethod + def load_base( + cls_child, + simulation, + structure, + modelname="NewModel", + model_nam_file="modflowtest.nam", + mtype="gwf", + version="mf6", + exe_name: Union[str, os.PathLike] = "mf6", + strict=True, + model_rel_path=os.curdir, + load_only=None, + ): + """ + Class method that loads an existing model. + + Parameters + ---------- + simulation : MFSimulation + simulation object that this model is a part of + simulation_data : MFSimulationData + simulation data object + structure : MFModelStructure + structure of this type of model + model_name : str + name of the model + model_nam_file : str + relative path to the model name file from model working folder + version : str + version of modflow + exe_name : str or PathLike + model executable name or path + strict : bool + strict mode when loading files + model_rel_path : str + relative path of model folder to simulation folder + load_only : list + list of package abbreviations or package names corresponding to + packages that flopy will load. default is None, which loads all + packages. the discretization packages will load regardless of this + setting. subpackages, like time series and observations, will also + load regardless of this setting. + example list: ['ic', 'maw', 'npf', 'oc', 'my_well_package_1'] + + Returns + ------- + model : MFModel + + Examples + -------- + """ + instance = cls_child( + simulation, + modelname, + model_nam_file=model_nam_file, + version=version, + exe_name=exe_name, + add_to_simulation=False, + structure=structure, + model_rel_path=model_rel_path, + ) + + # build case consistent load_only dictionary for quick lookups + load_only = PackageContainer._load_only_dict(load_only) + + # load name file + instance.name_file.load(strict) + + # order packages + vnum = mfstructure.MFStructure().get_version_string() + # FIX: Transport - Priority packages maybe should not be hard coded + priority_packages = { + f"dis{vnum}": 1, + f"disv{vnum}": 1, + f"disu{vnum}": 1, + } + packages_ordered = [] + package_recarray = instance.simulation_data.mfdata[ + (modelname, "nam", "packages", "packages") + ] + if package_recarray.array is None: + return instance + + for item in package_recarray.get_data(): + if item[0] in priority_packages: + packages_ordered.insert(0, (item[0], item[1], item[2])) + else: + packages_ordered.append((item[0], item[1], item[2])) + + # load packages + sim_struct = mfstructure.MFStructure().sim_struct + instance._ftype_num_dict = {} + for ftype, fname, pname in packages_ordered: + ftype_orig = ftype + ftype = ftype[0:-1].lower() + if ( + ftype in structure.package_struct_objs + or ftype in sim_struct.utl_struct_objs + ): + if ( + load_only is not None + and not PackageContainer._in_pkg_list( + priority_packages, ftype_orig, pname + ) + and not PackageContainer._in_pkg_list( + load_only, ftype_orig, pname + ) + ): + if ( + simulation.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + print(f" skipping package {ftype}...") + continue + if model_rel_path and model_rel_path != ".": + # strip off model relative path from the file path + filemgr = simulation.simulation_data.mfpath + fname = filemgr.strip_model_relative_path(modelname, fname) + if ( + simulation.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + print(f" loading package {ftype}...") + # load package + instance.load_package(ftype, fname, pname, strict, None) + sim_data = simulation.simulation_data + if ftype == "dis" and not sim_data.max_columns_user_set: + # set column wrap to ncol + dis = instance.get_package("dis", type_only=True) + if dis is not None and hasattr(dis, "ncol"): + sim_data.max_columns_of_data = dis.ncol.get_data() + sim_data.max_columns_user_set = False + sim_data.max_columns_auto_set = True + # load referenced packages + if modelname in instance.simulation_data.referenced_files: + for ref_file in instance.simulation_data.referenced_files[ + modelname + ].values(): + if ( + ref_file.file_type in structure.package_struct_objs + or ref_file.file_type in sim_struct.utl_struct_objs + ) and not ref_file.loaded: + instance.load_package( + ref_file.file_type, + ref_file.file_name, + None, + strict, + ref_file.reference_path, + ) + ref_file.loaded = True + + # TODO: fix jagged lists where appropriate + + return instance + + def inspect_cells( + self, + cell_list, + stress_period=None, + output_file_path=None, + inspect_budget=True, + inspect_dependent_var=True, + ): + """ + Inspect model cells. Returns model data associated with cells. + + Parameters + ---------- + cell_list : list of tuples + List of model cells. Each model cell is a tuple of integers. + ex: [(1,1,1), (2,4,3)] + stress_period : int + For transient data qnly return data from this stress period. If + not specified or None, all stress period data will be returned. + output_file_path: str + Path to output file that will contain the inspection results + inspect_budget: bool + Inspect budget file + inspect_dependent_var: bool + Inspect head file + Returns + ------- + output : dict + Dictionary containing inspection results + + Examples + -------- + + >>> import flopy + >>> sim = flopy.mf6.MFSimulationBase.load("name", "mf6", "mf6", ".") + >>> model = sim.get_model() + >>> inspect_list = [(2, 3, 2), (0, 4, 2), (0, 2, 4)] + >>> out_file = os.path.join("temp", "inspect_AdvGW_tidal.csv") + >>> model.inspect_cells(inspect_list, output_file_path=out_file) + """ + # handle no cell case + if cell_list is None or len(cell_list) == 0: + return None + + output_by_package = {} + # loop through all packages + for pp in self.packagelist: + # call the package's "inspect_cells" method + package_output = pp.inspect_cells(cell_list, stress_period) + if len(package_output) > 0: + output_by_package[f"{pp.package_name} package"] = ( + package_output + ) + # get dependent variable + if inspect_dependent_var: + try: + if self.model_type == "gwf6": + heads = self.output.head() + name = "heads" + elif self.model_type == "gwt6": + heads = self.output.concentration() + name = "concentration" + else: + inspect_dependent_var = False + except Exception: + inspect_dependent_var = False + if inspect_dependent_var and heads is not None: + kstp_kper_lst = heads.get_kstpkper() + data_output = DataSearchOutput((name,)) + data_output.output = True + for kstp_kper in kstp_kper_lst: + if stress_period is not None and stress_period != kstp_kper[1]: + continue + head_array = np.array(heads.get_data(kstpkper=kstp_kper)) + # flatten output data in disv and disu cases + if len(cell_list[0]) == 2: + head_array = head_array[0, :, :] + elif len(cell_list[0]) == 1: + head_array = head_array[0, 0, :] + # find data matches + self.match_array_cells( + cell_list, + head_array.shape, + head_array, + kstp_kper, + data_output, + ) + if len(data_output.data_entries) > 0: + output_by_package[f"{name} output"] = [data_output] + + # get model dimensions + model_shape = self.modelgrid.shape + + # get budgets + if inspect_budget: + try: + bud = self.output.budget() + except Exception: + inspect_budget = False + if inspect_budget and bud is not None: + kstp_kper_lst = bud.get_kstpkper() + rec_names = bud.get_unique_record_names() + budget_matches = [] + for rec_name in rec_names: + # clean up binary string name + string_name = str(rec_name)[3:-1].strip() + data_output = DataSearchOutput((string_name,)) + data_output.output = True + for kstp_kper in kstp_kper_lst: + if ( + stress_period is not None + and stress_period != kstp_kper[1] + ): + continue + budget_array = np.array( + bud.get_data( + kstpkper=kstp_kper, + text=rec_name, + full3D=True, + )[0] + ) + if len(budget_array.shape) == 4: + # get rid of 4th "time" dimension + budget_array = budget_array[0, :, :, :] + # flatten output data in disv and disu cases + if len(cell_list[0]) == 2 and len(budget_array.shape) >= 3: + budget_array = budget_array[0, :, :] + elif ( + len(cell_list[0]) == 1 and len(budget_array.shape) >= 2 + ): + budget_array = budget_array[0, :] + # find data matches + if budget_array.shape != model_shape: + # no support yet for different shaped budgets like + # flow_ja_face + continue + + self.match_array_cells( + cell_list, + budget_array.shape, + budget_array, + kstp_kper, + data_output, + ) + if len(data_output.data_entries) > 0: + budget_matches.append(data_output) + if len(budget_matches) > 0: + output_by_package["budget output"] = budget_matches + + if len(output_by_package) > 0 and output_file_path is not None: + with open(output_file_path, "w") as fd: + # write document header + fd.write(f"Inspect cell results for model {self.name}\n") + output = [] + for cell in cell_list: + output.append(" ".join([str(i) for i in cell])) + output = ",".join(output) + fd.write(f"Model cells inspected,{output}\n\n") + + for package_name, matches in output_by_package.items(): + fd.write(f"Results from {package_name}\n") + for search_output in matches: + # write header line with data name + fd.write( + f",Results from " + f"{search_output.path_to_data[-1]}\n" + ) + # write data header + if search_output.transient: + if search_output.output: + fd.write(",stress_period,time_step") + else: + fd.write(",stress_period/key") + if search_output.data_header is not None: + if len(search_output.data_entry_cellids) > 0: + fd.write(",cellid") + h_columns = ",".join(search_output.data_header) + fd.write(f",{h_columns}\n") + else: + fd.write(",cellid,data\n") + # write data found + for index, data_entry in enumerate( + search_output.data_entries + ): + if search_output.transient: + sp = search_output.data_entry_stress_period[ + index + ] + if search_output.output: + fd.write(f",{sp[1]},{sp[0]}") + else: + fd.write(f",{sp}") + if search_output.data_header is not None: + if len(search_output.data_entry_cellids) > 0: + cells = search_output.data_entry_cellids[ + index + ] + output = " ".join([str(i) for i in cells]) + fd.write(f",{output}") + fd.write(self._format_data_entry(data_entry)) + else: + output = " ".join( + [ + str(i) + for i in search_output.data_entry_ids[ + index + ] + ] + ) + fd.write(f",{output}") + fd.write(self._format_data_entry(data_entry)) + fd.write("\n") + return output_by_package + + def match_array_cells( + self, cell_list, data_shape, array_data, key, data_output + ): + # loop through list of cells we are searching for + for cell in cell_list: + if len(data_shape) == 3 or data_shape[0] == "nodes": + # data is by cell + if array_data.ndim == 3 and len(cell) == 3: + data_output.data_entries.append( + array_data[cell[0], cell[1], cell[2]] + ) + data_output.data_entry_ids.append(cell) + data_output.data_entry_stress_period.append(key) + elif array_data.ndim == 2 and len(cell) == 2: + data_output.data_entries.append( + array_data[cell[0], cell[1]] + ) + data_output.data_entry_ids.append(cell) + data_output.data_entry_stress_period.append(key) + elif array_data.ndim == 1 and len(cell) == 1: + data_output.data_entries.append(array_data[cell[0]]) + data_output.data_entry_ids.append(cell) + data_output.data_entry_stress_period.append(key) + else: + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + warning_str = ( + 'WARNING: CellID "{}" not same ' + "number of dimensions as data " + "{}.".format(cell, data_output.path_to_data) + ) + print(warning_str) + elif len(data_shape) == 2: + # get data based on ncpl/lay + if array_data.ndim == 2 and len(cell) == 2: + data_output.data_entries.append( + array_data[cell[0], cell[1]] + ) + data_output.data_entry_ids.append(cell) + data_output.data_entry_stress_period.append(key) + elif array_data.ndim == 1 and len(cell) == 1: + data_output.data_entries.append(array_data[cell[0]]) + data_output.data_entry_ids.append(cell) + data_output.data_entry_stress_period.append(key) + elif len(data_shape) == 1: + # get data based on nodes + if len(cell) == 1 and array_data.ndim == 1: + data_output.data_entries.append(array_data[cell[0]]) + data_output.data_entry_ids.append(cell) + data_output.data_entry_stress_period.append(key) + + @staticmethod + def _format_data_entry(data_entry): + output = "" + if iterable(data_entry, True): + for item in data_entry: + if isinstance(item, tuple): + formatted = " ".join([str(i) for i in item]) + output = f"{output},{formatted}" + else: + output = f"{output},{item}" + return f"{output}\n" + else: + return f",{data_entry}\n" + + def write(self, ext_file_action=ExtFileAction.copy_relative_paths): + """ + Writes out model's package files. + + Parameters + ---------- + ext_file_action : ExtFileAction + Defines what to do with external files when the simulation path has + changed. defaults to copy_relative_paths which copies only files + with relative paths, leaving files defined by absolute paths fixed. + + """ + + # write name file + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + print(" writing model name file...") + + self.name_file.write(ext_file_action=ext_file_action) + + if not self.simulation_data.max_columns_user_set: + grid_type = self.get_grid_type() + if grid_type == DiscretizationType.DIS: + self.simulation_data.max_columns_of_data = self.dis.ncol.get_data() + self.simulation_data.max_columns_user_set = False + self.simulation_data.max_columns_auto_set = True + + # write packages + for pp in self.packagelist: + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + print(f" writing package {pp._get_pname()}...") + pp.write(ext_file_action=ext_file_action) + + def get_grid_type(self): + """ + Return the type of grid used by model 'model_name' in simulation + containing simulation data 'simulation_data'. + + Returns + ------- + grid type : DiscretizationType + """ + package_recarray = self.name_file.packages + structure = mfstructure.MFStructure() + if ( + package_recarray.search_data( + f"dis{structure.get_version_string()}", 0 + ) + is not None + ): + return DiscretizationType.DIS + elif ( + package_recarray.search_data( + f"disv{structure.get_version_string()}", 0 + ) + is not None + ): + return DiscretizationType.DISV + elif ( + package_recarray.search_data( + f"disu{structure.get_version_string()}", 0 + ) + is not None + ): + return DiscretizationType.DISU + elif ( + package_recarray.search_data( + f"disv1d{structure.get_version_string()}", 0 + ) + is not None + ): + return DiscretizationType.DISV1D + elif ( + package_recarray.search_data( + f"dis2d{structure.get_version_string()}", 0 + ) + is not None + ): + return DiscretizationType.DIS2D + elif ( + package_recarray.search_data( + f"disv2d{structure.get_version_string()}", 0 + ) + is not None + ): + return DiscretizationType.DISV2D + + return DiscretizationType.UNDEFINED + + def get_ims_package(self): + """Get the IMS package associated with this model. + + Returns + ------- + IMS package : ModflowIms + """ + solution_group = self.simulation.name_file.solutiongroup.get_data(0) + for record in solution_group: + for name in record.dtype.names: + if name == "slntype" or name == "slnfname": + continue + if record[name] == self.name: + return self.simulation.get_solution_package( + record.slnfname + ) + return None + + def get_steadystate_list(self): + """Returns a list of stress periods that are steady state. + + Returns + ------- + steady state list : list + + """ + ss_list = [] + tdis = self.simulation.get_package("tdis") + period_data = tdis.perioddata.get_data() + index = 0 + pd_len = len(period_data) + while index < pd_len: + ss_list.append(True) + index += 1 + + storage = self.get_package("sto", type_only=True) + if storage is not None: + tr_keys = storage.transient.get_keys(True) + ss_keys = storage.steady_state.get_keys(True) + for key in tr_keys: + ss_list[key] = False + for ss_list_key in range(key + 1, len(ss_list)): + for ss_key in ss_keys: + if ss_key == ss_list_key: + break + ss_list[key] = False + return ss_list + + def is_valid(self): + """ + Checks the validity of the model and all of its packages + + Returns + ------- + valid : bool + + """ + + # valid name file + if not self.name_file.is_valid(): + return False + + # valid packages + for pp in self.packagelist: + if not pp.is_valid(): + return False + + # required packages exist + for package_struct in self.structure.package_struct_objs.values(): + if ( + not package_struct.optional + and package_struct.file_type + not in self._package_container.package_type_dict + ): + return False + + return True + + def set_model_relative_path(self, model_ws): + """ + Sets the file path to the model folder relative to the simulation + folder and updates all model file paths, placing them in the model + folder. + + Parameters + ---------- + model_ws : str + Model working folder relative to simulation working folder + + """ + # set all data internal + self.set_all_data_internal(False) + + # update path in the file manager + file_mgr = self.simulation_data.mfpath + file_mgr.set_last_accessed_model_path() + path = model_ws + file_mgr.model_relative_path[self.name] = path + + if ( + model_ws + and model_ws != "." + and self.simulation.name_file is not None + ): + model_folder_path = file_mgr.get_model_path(self.name) + if not os.path.exists(model_folder_path): + # make new model folder + os.makedirs(model_folder_path) + # update model name file location in simulation name file + models = self.simulation.name_file.models + models_data = models.get_data() + for index, entry in enumerate(models_data): + old_model_file_name = os.path.split(entry[1])[1] + old_model_base_name = os.path.splitext(old_model_file_name)[0] + if ( + old_model_base_name.lower() == self.name.lower() + or self.name == entry[2] + ): + models_data[index][1] = os.path.join( + path, old_model_file_name + ) + break + models.set_data(models_data) + + if self.name_file is not None: + # update listing file location in model name file + list_file = self.name_file.list.get_data() + if list_file: + path, list_file_name = os.path.split(list_file) + try: + self.name_file.list.set_data( + os.path.join(path, list_file_name) + ) + except MFDataException as mfde: + message = ( + "Error occurred while setting relative " + 'path "{}" in model ' + '"{}".'.format( + os.path.join(path, list_file_name), self.name + ) + ) + raise MFDataException( + mfdata_except=mfde, + model=self.model_name, + package=self.name_file._get_pname(), + message=message, + ) + # update package file locations in model name file + packages = self.name_file.packages + packages_data = packages.get_data() + if packages_data is not None: + for index, entry in enumerate(packages_data): + # get package object associated with entry + package = None + if len(entry) >= 3: + package = self.get_package(entry[2]) + if package is None: + package = self.get_package(entry[0]) + if package is not None: + # combine model relative path with package path + packages_data[index][1] = os.path.join( + path, package.filename + ) + else: + # package not found, create path based on + # information in name file + old_package_name = os.path.split(entry[1])[-1] + packages_data[index][1] = os.path.join( + path, old_package_name + ) + packages.set_data(packages_data) + # update files referenced from within packages + for package in self.packagelist: + package.set_model_relative_path(model_ws) + + def _remove_package_from_dictionaries(self, package): + # remove package from local dictionaries and lists + if package.path in self._package_paths: + del self._package_paths[package.path] + self._package_container.remove_package(package) + + def get_package(self, name=None, type_only=False, name_only=False): + """ + Finds a package by package name, package key, package type, or partial + package name. returns either a single package, a list of packages, + or None. + + Parameters + ---------- + name : str + Name or type of the package, 'my-riv-1, 'RIV', 'LPF', etc. + type_only : bool + Search for package by type only + name_only : bool + Search for package by name only + + Returns + ------- + pp : Package object + + """ + return self._package_container.get_package(name, type_only, name_only) + + def remove_package(self, package_name): + """ + Removes package and all child packages from the model. + `package_name` can be the package's name, type, or package object to + be removed from the model. + + Parameters + ---------- + package_name : str + Package name, package type, or package object to be removed from + the model. + + """ + if isinstance(package_name, MFPackage): + packages = [package_name] + else: + packages = self.get_package(package_name) + if not isinstance(packages, list) and packages is not None: + packages = [packages] + if packages is None: + return + for package in packages: + if package.model_or_sim.name != self.name: + except_text = ( + "Package can not be removed from model " + "{self.model_name} since it is not part of it." + ) + raise mfstructure.FlopyException(except_text) + + self._remove_package_from_dictionaries(package) + + try: + # remove package from name file + package_data = self.name_file.packages.get_data() + except MFDataException as mfde: + message = ( + "Error occurred while reading package names " + "from name file in model " + f'"{self.name}"' + ) + raise MFDataException( + mfdata_except=mfde, + model=self.model_name, + package=self.name_file._get_pname(), + message=message, + ) + try: + new_rec_array = None + for item in package_data: + filename = os.path.basename(item[1]) + if filename != package.filename: + if new_rec_array is None: + new_rec_array = np.rec.array( + [item.tolist()], package_data.dtype + ) + else: + new_rec_array = np.hstack((item, new_rec_array)) + except: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.structure.get_model(), + self.structure.get_package(), + self._path, + "building package recarray", + self.structure.name, + inspect.stack()[0][3], + type_, + value_, + traceback_, + None, + self.simulation_data.debug, + ) + try: + self.name_file.packages.set_data(new_rec_array) + except MFDataException as mfde: + message = ( + "Error occurred while setting package names " + f'from name file in model "{self.name}". Package name ' + f"data:\n{new_rec_array}" + ) + raise MFDataException( + mfdata_except=mfde, + model=self.model_name, + package=self.name_file._get_pname(), + message=message, + ) + + # build list of child packages + child_package_list = [] + for pkg in self.packagelist: + if ( + pkg.parent_file is not None + and pkg.parent_file.path == package.path + ): + child_package_list.append(pkg) + # remove child packages + for child_package in child_package_list: + self._remove_package_from_dictionaries(child_package) + + def update_package_filename(self, package, new_name): + """ + Updates the filename for a package. For internal flopy use only. + + Parameters + ---------- + package : MFPackage + Package object + new_name : str + New package name + """ + try: + # get namefile package data + package_data = self.name_file.packages.get_data() + except MFDataException as mfde: + message = ( + "Error occurred while updating package names " + "from name file in model " + f'"{self.name}".' + ) + raise MFDataException( + mfdata_except=mfde, + model=self.model_name, + package=self.name_file._get_pname(), + message=message, + ) + try: + file_mgr = self.simulation_data.mfpath + model_rel_path = file_mgr.model_relative_path[self.name] + # update namefile package data with new name + new_rec_array = None + old_leaf = os.path.split(package.filename)[1] + for item in package_data: + leaf = os.path.split(item[1])[1] + if leaf == old_leaf: + item[1] = os.path.join(model_rel_path, new_name) + + if new_rec_array is None: + new_rec_array = np.rec.array( + [item.tolist()], package_data.dtype + ) + else: + new_rec_array = np.hstack((item, new_rec_array)) + except: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.structure.get_model(), + self.structure.get_package(), + self._path, + "updating package filename", + self.structure.name, + inspect.stack()[0][3], + type_, + value_, + traceback_, + None, + self.simulation_data.debug, + ) + try: + self.name_file.packages.set_data(new_rec_array) + except MFDataException as mfde: + message = ( + "Error occurred while updating package names " + f'from name file in model "{self.name}". Package name ' + f"data:\n{new_rec_array}" + ) + raise MFDataException( + mfdata_except=mfde, + model=self.model_name, + package=self.name_file._get_pname(), + message=message, + ) + + def rename_all_packages(self, name): + """Renames all package files in the model. + + Parameters + ---------- + name : str + Prefix of package names. Packages files will be named + .. + + """ + nam_filename = f"{name}.nam" + self.simulation.rename_model_namefile(self, nam_filename) + self.name_file.filename = nam_filename + self.model_nam_file = nam_filename + package_type_count = {} + for package in self.packagelist: + if package.package_type not in package_type_count: + base_filename, leaf = os.path.split(package.filename) + lleaf = leaf.split(".") + if len(lleaf) > 1: + # keep existing extension + ext = lleaf[-1] + else: + # no extension found, create a new one + ext = package.package_type + new_fileleaf = f"{name}.{ext}" + if base_filename != "": + package.filename = os.path.join( + base_filename, new_fileleaf + ) + else: + package.filename = new_fileleaf + package_type_count[package.package_type] = 1 + else: + package_type_count[package.package_type] += 1 + package.filename = "{}_{}.{}".format( + name, + package_type_count[package.package_type], + package.package_type, + ) + + def set_all_data_external( + self, + check_data=True, + external_data_folder=None, + base_name=None, + binary=False, + ): + """Sets the model's list and array data to be stored externally. + + Warning + ------- + The MF6 check mechanism is deprecated pending reimplementation + in a future release. While the checks API will remain in place + through 3.x, it may be unstable, and will likely change in 4.x. + + Parameters + ---------- + check_data : bool + Determines if data error checking is enabled during this + process. + external_data_folder + Folder, relative to the simulation path or model relative path + (see use_model_relative_path parameter), where external data + will be stored + base_name: str + Base file name prefix for all files + binary: bool + Whether file will be stored as binary + + """ + for package in self.packagelist: + package.set_all_data_external( + check_data, + external_data_folder, + base_name, + binary, + ) + + def set_all_data_internal(self, check_data=True): + """Sets the model's list and array data to be stored externally. + + Parameters + ---------- + check_data : bool + Determines if data error checking is enabled during this + process. + + """ + for package in self.packagelist: + package.set_all_data_internal(check_data) + + def register_package( + self, + package, + add_to_package_list=True, + set_package_name=True, + set_package_filename=True, + ): + """ + Registers a package with the model. This method is used internally + by FloPy and is not intended for use by the end user. + + Parameters + ---------- + package : MFPackage + Package to register + add_to_package_list : bool + Add package to lookup list + set_package_name : bool + Produce a package name for this package + set_package_filename : bool + Produce a filename for this package + + Returns + ------- + path, package structure : tuple, MFPackageStructure + + """ + package.container_type = [PackageContainerType.model] + if package.parent_file is not None: + path = package.parent_file.path + (package.package_type,) + else: + path = (self.name, package.package_type) + package_struct = self.structure.get_package_struct( + package.package_type + ) + if add_to_package_list and path in self._package_paths: + if ( + package_struct is not None + and not package_struct.multi_package_support + and not isinstance(package.parent_file, MFPackage) + ): + # package of this type already exists, replace it + self.remove_package(package.package_type) + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + print( + "WARNING: Package with type {} already exists. " + "Replacing existing package" + ".".format(package.package_type) + ) + elif ( + not set_package_name + and package.package_name + in self._package_container.package_name_dict + ): + # package of this type with this name already + # exists, replace it + self.remove_package( + self._package_container.package_name_dict[ + package.package_name + ] + ) + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + print( + "WARNING: Package with name {} already exists. " + "Replacing existing package" + ".".format(package.package_name) + ) + + # make sure path is unique + if path in self._package_paths: + path_iter = datautil.PathIter(path) + for new_path in path_iter: + if new_path not in self._package_paths: + path = new_path + break + self._package_paths[path] = 1 + + if package.package_type.lower() == "nam": + if not package.internal_package: + excpt_str = ( + "Unable to register nam file. Do not create your own nam " + "files. Nam files are automatically created and managed " + "for you by FloPy." + ) + print(excpt_str) + raise FlopyException(excpt_str) + + return path, self.structure.name_file_struct_obj + + package_extension = package.package_type + if set_package_name: + # produce a default package name + if ( + package_struct is not None + and package_struct.multi_package_support + ): + # check for other registered packages of this type + name_iter = datautil.NameIter(package.package_type, False) + for package_name in name_iter: + if ( + package_name + not in self._package_container.package_name_dict + ): + package.package_name = package_name + suffix = package_name.split("_") + if ( + len(suffix) > 1 + and datautil.DatumUtil.is_int(suffix[-1]) + and suffix[-1] != "0" + ): + # update file extension to make unique + package_extension = ( + f"{package_extension}_{suffix[-1]}" + ) + break + else: + package.package_name = package.package_type + + if set_package_filename: + # filename uses model base name + package._filename = f"{self.name}.{package.package_type}" + if ( + package._filename + in self._package_container.package_filename_dict + ): + # auto generate a unique file name and register it + file_name = MFFileMgmt.unique_file_name( + package._filename, + self._package_container.package_filename_dict, + ) + package._filename = file_name + + if add_to_package_list: + self._package_container.add_package(package) + + # add obs file to name file if it does not have a parent + if package.package_type in self.structure.package_struct_objs or ( + package.package_type == "obs" and package.parent_file is None + ): + # update model name file + pkg_type = package.package_type.upper() + if ( + package.package_type != "obs" and + self.structure.package_struct_objs[ + package.package_type + ].read_as_arrays + ): + pkg_type = pkg_type[0:-1] + # Model Assumption - assuming all name files have a package + # recarray + file_mgr = self.simulation_data.mfpath + model_rel_path = file_mgr.model_relative_path[self.name] + if model_rel_path != ".": + package_rel_path = os.path.join( + model_rel_path, package.filename + ) + else: + package_rel_path = package.filename + self.name_file.packages.update_record( + [ + f"{pkg_type}6", + package_rel_path, + package.package_name, + ], + 0, + ) + if package_struct is not None: + return (path, package_struct) + else: + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + print( + "WARNING: Unable to register unsupported file type {} " + "for model {}.".format(package.package_type, self.name) + ) + return None, None + + def load_package( + self, + ftype, + fname, + pname, + strict, + ref_path, + dict_package_name=None, + parent_package: Optional[MFPackage] = None, + ): + """ + Loads a package from a file. This method is used internally by FloPy + and is not intended for the end user. + + Parameters + ---------- + ftype : str + the file type + fname : str + the name of the file containing the package input + pname : str + the user-defined name for the package + strict : bool + strict mode when loading the file + ref_path : str + path to the file. uses local path if set to None + dict_package_name : str + package name for dictionary lookup + parent_package : MFPackage + parent package + + Examples + -------- + """ + if ref_path is not None: + fname = os.path.join(ref_path, fname) + sim_struct = mfstructure.MFStructure().sim_struct + if ( + ftype in self.structure.package_struct_objs + and self.structure.package_struct_objs[ftype].multi_package_support + ) or ( + ftype in sim_struct.utl_struct_objs + and sim_struct.utl_struct_objs[ftype].multi_package_support + ): + # resolve dictionary name for package + if dict_package_name is not None: + if parent_package is not None: + dict_package_name = f"{parent_package.path[-1]}_{ftype}" + else: + # use dict_package_name as the base name + if ftype in self._ftype_num_dict: + self._ftype_num_dict[dict_package_name] += 1 + else: + self._ftype_num_dict[dict_package_name] = 0 + dict_package_name = "{}_{}".format( + dict_package_name, + self._ftype_num_dict[dict_package_name], + ) + else: + # use ftype as the base name + if ftype in self._ftype_num_dict: + self._ftype_num_dict[ftype] += 1 + else: + self._ftype_num_dict[ftype] = 1 + if pname is not None: + dict_package_name = pname + else: + dict_package_name = ( + f"{ftype}-{self._ftype_num_dict[ftype]}" + ) + else: + dict_package_name = ftype + + # clean up model type text + model_type = self.structure.model_type + while datautil.DatumUtil.is_int(model_type[-1]): + model_type = model_type[0:-1] + + # create package + package_obj = PackageContainer.package_factory(ftype, model_type) + package = package_obj( + self, + filename=fname, + pname=dict_package_name, + loading_package=True, + parent_file=parent_package, + _internal_package=True, + ) + try: + package.load(strict) + except ReadAsArraysException: + # create ReadAsArrays package and load it instead + package_obj = PackageContainer.package_factory( + f"{ftype}a", model_type + ) + package = package_obj( + self, + filename=fname, + pname=dict_package_name, + loading_package=True, + parent_file=parent_package, + _internal_package=True, + ) + package.load(strict) + + # register child package with the model + self._package_container.add_package(package) + if parent_package is not None: + # register child package with the parent package + parent_package.add_package(package) + + return package + + def plot(self, SelPackList=None, **kwargs): + """ + Plot 2-D, 3-D, transient 2-D, and stress period list (MfList) + model input data from a model instance + + Args: + model: Flopy model instance + SelPackList: (list) list of package names to plot, if none + all packages will be plotted + + **kwargs : dict + filename_base : str + Base file name that will be used to automatically generate file + names for output image files. Plots will be exported as image + files if file_name_base is not None. (default is None) + file_extension : str + Valid matplotlib.pyplot file extension for savefig(). Only used + if filename_base is not None. (default is 'png') + mflay : int + MODFLOW zero-based layer number to return. If None, then all + all layers will be included. (default is None) + kper : int + MODFLOW zero-based stress period number to return. + (default is zero) + key : str + MfList dictionary key. (default is None) + + Returns: + axes : list + Empty list is returned if filename_base is not None. Otherwise + a list of matplotlib.pyplot.axis are returned. + """ + from ..plot.plotutil import PlotUtilities + + axes = PlotUtilities._plot_model_helper( + self, SelPackList=SelPackList, **kwargs + ) + + return axes + + @staticmethod + def _resolve_idomain(idomain, botm): + if idomain is None: + if botm is None: + return idomain + else: + return np.ones_like(botm) + return idomain + + @staticmethod + def netcdf_attrs(mname, mtype, grid_type, mesh=None): + """Return dictionary of dataset (model) scoped attributes + Parameters + ---------- + mname : str + model name + mtype : str + model type + grid_type: + DiscretizationType + mesh : str + mesh type if dataset is ugrid compliant + """ + attrs = { + "modflow_grid": "", + "modflow_model": "", + } + if grid_type == DiscretizationType.DIS: + attrs["modflow_grid"] = "STRUCTURED" + elif grid_type == DiscretizationType.DISV: + attrs["modflow_grid"] = "VERTEX" + + attrs["modflow_model"] = ( + f"{mname.upper()}: MODFLOW 6 {mtype.upper()} model" + ) + + # supported => LAYERED + if mesh: + attrs["mesh"] = mesh + + return attrs + + def netcdf_info(self, mesh=None): + """Return dictionary of dataset (model) scoped attributes + Parameters + ---------- + mesh : str + mesh type if dataset is ugrid compliant + """ + attrs = MFModel.netcdf_attrs( + self.name, + self.model_type, + self.get_grid_type(), + mesh + ) + + res_d = {} + res_d['attrs'] = attrs + return res_d diff --git a/flopy/mf6/tmp/ruff/2/mfpackage.py b/flopy/mf6/tmp/ruff/2/mfpackage.py new file mode 100644 index 0000000000..90931d2b66 --- /dev/null +++ b/flopy/mf6/tmp/ruff/2/mfpackage.py @@ -0,0 +1,3720 @@ +import copy +import datetime +import errno +import inspect +import os +import sys +import warnings + +import numpy as np + +from ..mbase import ModelInterface +from ..pakbase import PackageInterface +from ..utils import datautil +from ..utils.check import mf6check +from ..version import __version__ +from .coordinates import modeldimensions +from .data import ( + mfdata, + mfdataarray, + mfdatalist, + mfdataplist, + mfdatascalar, + mfstructure, +) +from .data.mfdatautil import DataSearchOutput, MFComment, cellids_equal +from .data.mfstructure import DatumType, MFDataItemStructure, MFStructure +from .mfbase import ( + ExtFileAction, + FlopyException, + MFDataException, + MFFileMgmt, + MFInvalidTransientBlockHeaderException, + PackageContainer, + PackageContainerType, + ReadAsArraysException, + VerbosityLevel, +) +from .utils.output_util import MF6Output + + +class MFBlockHeader: + """ + Represents the header of a block in a MF6 input file. This class is used + internally by FloPy and its direct use by a user of this library is not + recommend. + + Parameters + ---------- + name : str + Block name + variable_strings : list + List of strings that appear after the block name + comment : MFComment + Comment text in the block header + + Attributes + ---------- + name : str + Block name + variable_strings : list + List of strings that appear after the block name + comment : MFComment + Comment text in the block header + data_items : list + List of MFVariable of the variables contained in this block + + """ + + def __init__( + self, + name, + variable_strings, + comment, + simulation_data=None, + path=None, + block=None, + ): + self.name = name + self.variable_strings = variable_strings + self.block = block + if not ( + (simulation_data is None and path is None) + or (simulation_data is not None and path is not None) + ): + raise FlopyException( + "Block header must be initialized with both " + "simulation_data and path or with neither." + ) + if simulation_data is None: + self.comment = comment + self.simulation_data = None + self.path = path + self.comment_path = None + else: + self.connect_to_dict(simulation_data, path, comment) + # TODO: Get data_items from dictionary + self.data_items = [] + # build block comment paths + self.blk_trailing_comment_path = ("blk_trailing_comment",) + self.blk_post_comment_path = ("blk_post_comment",) + if isinstance(path, list): + path = tuple(path) + if path is not None: + self.blk_trailing_comment_path = path + ( + name, + "blk_trailing_comment", + ) + self.blk_post_comment_path = path + ( + name, + "blk_post_comment", + ) + if self.blk_trailing_comment_path not in simulation_data.mfdata: + simulation_data.mfdata[self.blk_trailing_comment_path] = ( + MFComment("", "", simulation_data, 0) + ) + if self.blk_post_comment_path not in simulation_data.mfdata: + simulation_data.mfdata[self.blk_post_comment_path] = MFComment( + "\n", "", simulation_data, 0 + ) + else: + self.blk_trailing_comment_path = ("blk_trailing_comment",) + self.blk_post_comment_path = ("blk_post_comment",) + + def __lt__(self, other): + transient_key = self.get_transient_key() + if transient_key is None: + return True + else: + other_key = other.get_transient_key() + if other_key is None: + return False + else: + return transient_key < other_key + + def build_header_variables( + self, + simulation_data, + block_header_structure, + block_path, + data, + dimensions, + ): + """Builds data objects to hold header variables.""" + self.data_items = [] + var_path = block_path + (block_header_structure[0].name,) + + # fix up data + fixed_data = [] + if ( + block_header_structure[0].data_item_structures[0].type + == DatumType.keyword + ): + data_item = block_header_structure[0].data_item_structures[0] + fixed_data.append(data_item.name) + if isinstance(data, tuple): + data = list(data) + if isinstance(data, list): + fixed_data = fixed_data + data + else: + fixed_data.append(data) + if len(fixed_data) > 0: + fixed_data = [tuple(fixed_data)] + # create data object + new_data = self.block.data_factory( + simulation_data, + None, + block_header_structure[0], + True, + var_path, + dimensions, + fixed_data, + ) + + self.add_data_item(new_data, data) + + def add_data_item(self, new_data, data): + """Adds data to the block.""" + self.data_items.append(new_data) + while isinstance(data, list): + if len(data) > 0: + data = data[0] + else: + data = None + if not isinstance(data, tuple): + data = (data,) + self.blk_trailing_comment_path += data + self.blk_post_comment_path += data + + def is_same_header(self, block_header): + """Checks if `block_header` is the same header as this header.""" + if len(self.variable_strings) > 0: + if len(self.variable_strings) != len( + block_header.variable_strings + ): + return False + else: + for sitem, oitem in zip( + self.variable_strings, block_header.variable_strings + ): + if sitem != oitem: + return False + return True + elif ( + len(self.data_items) > 0 and len(block_header.variable_strings) > 0 + ): + typ_obj = ( + self.data_items[0].structure.data_item_structures[0].type_obj + ) + if typ_obj == int or typ_obj == float: + return bool( + self.variable_strings[0] + == block_header.variable_strings[0] + ) + else: + return True + elif len(self.data_items) == len(block_header.variable_strings): + return True + return False + + def get_comment(self): + """Get block header comment""" + if self.simulation_data is None: + return self.comment + else: + return self.simulation_data.mfdata[self.comment_path] + + def connect_to_dict(self, simulation_data, path, comment=None): + """Add comment to the simulation dictionary""" + self.simulation_data = simulation_data + self.path = path + self.comment_path = path + ("blk_hdr_comment",) + if comment is None: + simulation_data.mfdata[self.comment_path] = self.comment + else: + simulation_data.mfdata[self.comment_path] = comment + self.comment = None + + def write_header(self, fd): + """Writes block header to file object `fd`. + + Parameters + ---------- + fd : file object + File object to write block header to. + + """ + fd.write(f"BEGIN {self.name}") + if len(self.data_items) > 0: + if isinstance(self.data_items[0], mfdatascalar.MFScalar): + one_based = ( + self.data_items[0].structure.type == DatumType.integer + ) + entry = self.data_items[0].get_file_entry( + values_only=True, one_based=one_based + ) + else: + entry = self.data_items[0].get_file_entry() + fd.write(str(entry.rstrip())) + if len(self.data_items) > 1: + for data_item in self.data_items[1:]: + entry = data_item.get_file_entry(values_only=True) + fd.write(str(entry).rstrip()) + if self.get_comment().text: + fd.write(" ") + self.get_comment().write(fd) + fd.write("\n") + + def write_footer(self, fd): + """Writes block footer to file object `fd`. + + Parameters + ---------- + fd : file object + File object to write block footer to. + + """ + fd.write(f"END {self.name}") + if len(self.data_items) > 0: + one_based = self.data_items[0].structure.type == DatumType.integer + if isinstance(self.data_items[0], mfdatascalar.MFScalar): + entry = self.data_items[0].get_file_entry( + values_only=True, one_based=one_based + ) + else: + entry = self.data_items[0].get_file_entry() + fd.write(str(entry.rstrip())) + fd.write("\n") + + def get_transient_key(self, data_path=None): + """Get transient key associated with this block header.""" + transient_key = None + for index in range(0, len(self.data_items)): + if self.data_items[index].structure.type != DatumType.keyword: + if data_path == self.data_items[index].path: + # avoid infinite recursion + return True + transient_key = self.data_items[index].get_data() + if isinstance(transient_key, np.recarray): + item_struct = self.data_items[index].structure + key_index = item_struct.first_non_keyword_index() + if not ( + key_index is not None + and len(transient_key[0]) > key_index + ): + if key_index is None: + raise FlopyException( + "Block header index could " + "not be determined." + ) + else: + raise FlopyException( + 'Block header index "{}" ' + 'must be less than "{}"' + ".".format(key_index, len(transient_key[0])) + ) + transient_key = transient_key[0][key_index] + break + return transient_key + + +class MFBlock: + """ + Represents a block in a MF6 input file. This class is used internally + by FloPy and use by users of the FloPy library is not recommended. + + Parameters + ---------- + simulation_data : MFSimulationData + Data specific to this simulation + dimensions : MFDimensions + Describes model dimensions including model grid and simulation time + structure : MFVariableStructure + Structure describing block + path : tuple + Unique path to block + + Attributes + ---------- + block_headers : MFBlockHeader + Block header text (BEGIN/END), header variables, comments in the + header + structure : MFBlockStructure + Structure describing block + path : tuple + Unique path to block + datasets : OrderDict + Dictionary of dataset objects with keys that are the name of the + dataset + datasets_keyword : dict + Dictionary of dataset objects with keys that are key words to identify + start of dataset + enabled : bool + If block is being used in the simulation + + """ + + def __init__( + self, + simulation_data, + dimensions, + structure, + path, + model_or_sim, + container_package, + ): + self._simulation_data = simulation_data + self._dimensions = dimensions + self._model_or_sim = model_or_sim + self._container_package = container_package + self.block_headers = [ + MFBlockHeader( + structure.name, + [], + MFComment("", path, simulation_data, 0), + simulation_data, + path, + self, + ) + ] + self.structure = structure + self.path = path + self.datasets = {} + self.datasets_keyword = {} + # initially disable if optional + self.enabled = structure.number_non_optional_data() > 0 + self.loaded = False + self.external_file_name = None + self._structure_init() + + def __repr__(self): + return self._get_data_str(True) + + def __str__(self): + return self._get_data_str(False) + + def _get_data_str(self, formal): + data_str = "" + for dataset in self.datasets.values(): + if formal: + ds_repr = repr(dataset) + if len(ds_repr.strip()) > 0: + data_str = ( + f"{data_str}{dataset.structure.name}\n{dataset!r}\n" + ) + else: + ds_str = str(dataset) + if len(ds_str.strip()) > 0: + data_str = ( + f"{data_str}{dataset.structure.name}\n{dataset!s}\n" + ) + return data_str + + # return an MFScalar, MFList, or MFArray + def data_factory( + self, + sim_data, + model_or_sim, + structure, + enable, + path, + dimensions, + data=None, + package=None, + ): + """Creates the appropriate data child object derived from MFData.""" + data_type = structure.get_datatype() + # examine the data structure and determine the data type + if ( + data_type == mfstructure.DataType.scalar_keyword + or data_type == mfstructure.DataType.scalar + ): + return mfdatascalar.MFScalar( + sim_data, + model_or_sim, + structure, + data, + enable, + path, + dimensions, + ) + elif ( + data_type == mfstructure.DataType.scalar_keyword_transient + or data_type == mfstructure.DataType.scalar_transient + ): + trans_scalar = mfdatascalar.MFScalarTransient( + sim_data, model_or_sim, structure, enable, path, dimensions + ) + if data is not None: + trans_scalar.set_data(data, key=0) + return trans_scalar + elif data_type == mfstructure.DataType.array: + return mfdataarray.MFArray( + sim_data, + model_or_sim, + structure, + data, + enable, + path, + dimensions, + self, + ) + elif data_type == mfstructure.DataType.array_transient: + trans_array = mfdataarray.MFTransientArray( + sim_data, + model_or_sim, + structure, + enable, + path, + dimensions, + self, + ) + if data is not None: + trans_array.set_data(data, key=0) + return trans_array + elif data_type == mfstructure.DataType.list: + if ( + structure.basic_item + and self._container_package.package_type.lower() != "nam" + and self._simulation_data.use_pandas + ): + return mfdataplist.MFPandasList( + sim_data, + model_or_sim, + structure, + data, + enable, + path, + dimensions, + package, + self, + ) + else: + return mfdatalist.MFList( + sim_data, + model_or_sim, + structure, + data, + enable, + path, + dimensions, + package, + self, + ) + elif data_type == mfstructure.DataType.list_transient: + if structure.basic_item and self._simulation_data.use_pandas: + trans_list = mfdataplist.MFPandasTransientList( + sim_data, + model_or_sim, + structure, + enable, + path, + dimensions, + package, + self, + ) + else: + trans_list = mfdatalist.MFTransientList( + sim_data, + model_or_sim, + structure, + enable, + path, + dimensions, + package, + self, + ) + if data is not None: + trans_list.set_data(data, key=0, autofill=True) + return trans_list + elif data_type == mfstructure.DataType.list_multiple: + mult_list = mfdatalist.MFMultipleList( + sim_data, + model_or_sim, + structure, + enable, + path, + dimensions, + package, + self, + ) + if data is not None: + mult_list.set_data(data, key=0, autofill=True) + return mult_list + + def _structure_init(self): + # load datasets keywords into dictionary + for dataset_struct in self.structure.data_structures.values(): + for keyword in dataset_struct.get_keywords(): + self.datasets_keyword[keyword] = dataset_struct + # load block header data items into dictionary + for dataset in self.structure.block_header_structure: + self._new_dataset(dataset.name, dataset, True, None) + + def set_model_relative_path(self, model_ws): + """Sets `model_ws` as the model path relative to the simulation's + path. + + Parameters + ---------- + model_ws : str + Model path relative to the simulation's path. + """ + # update datasets + for key, dataset in self.datasets.items(): + if dataset.structure.file_data: + try: + file_data = dataset.get_data() + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message="Error occurred while " + "getting file data from " + '"{}"'.format(dataset.structure.name), + ) + if file_data: + # update file path location for all file paths + for file_line in file_data: + old_file_name = os.path.split(file_line[0])[1] + file_line[0] = os.path.join(model_ws, old_file_name) + # update block headers + for block_header in self.block_headers: + for dataset in block_header.data_items: + if dataset.structure.file_data: + try: + file_data = dataset.get_data() + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message="Error occurred while " + "getting file data from " + '"{}"'.format(dataset.structure.name), + ) + + if file_data: + # update file path location for all file paths + for file_line in file_data: + old_file_path, old_file_name = os.path.split( + file_line[1] + ) + new_file_path = os.path.join( + model_ws, old_file_name + ) + # update transient keys of datasets within the + # block + for key, idataset in self.datasets.items(): + if isinstance(idataset, mfdata.MFTransient): + idataset.update_transient_key( + file_line[1], new_file_path + ) + file_line[1] = os.path.join( + model_ws, old_file_name + ) + + def add_dataset(self, dataset_struct, data, var_path): + """Add data to this block.""" + try: + self.datasets[var_path[-1]] = self.data_factory( + self._simulation_data, + self._model_or_sim, + dataset_struct, + True, + var_path, + self._dimensions, + data, + self._container_package, + ) + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message="Error occurred while adding" + ' dataset "{}" to block ' + '"{}"'.format(dataset_struct.name, self.structure.name), + ) + + self._simulation_data.mfdata[var_path] = self.datasets[var_path[-1]] + dtype = dataset_struct.get_datatype() + if ( + dtype == mfstructure.DataType.list_transient + or dtype == mfstructure.DataType.list_multiple + or dtype == mfstructure.DataType.array_transient + ): + # build repeating block header(s) + if isinstance(data, dict): + # Add block headers for each dictionary key + for index in data: + if isinstance(index, tuple): + header_list = list(index) + else: + header_list = [index] + self._build_repeating_header(header_list) + elif isinstance(data, list): + # Add a single block header of value 0 + self._build_repeating_header([0]) + elif ( + dtype != mfstructure.DataType.list_multiple + and data is not None + ): + self._build_repeating_header([[0]]) + + return self.datasets[var_path[-1]] + + def _build_repeating_header(self, header_data): + if self.header_exists(header_data[0]): + return + if ( + len(self.block_headers[-1].data_items) == 1 + and self.block_headers[-1].data_items[0].get_data() is not None + ): + block_header_path = self.path + (len(self.block_headers) + 1,) + block_header = MFBlockHeader( + self.structure.name, + [], + MFComment("", self.path, self._simulation_data, 0), + self._simulation_data, + block_header_path, + self, + ) + self.block_headers.append(block_header) + else: + block_header_path = self.path + (len(self.block_headers),) + + struct = self.structure + last_header = self.block_headers[-1] + try: + last_header.build_header_variables( + self._simulation_data, + struct.block_header_structure, + block_header_path, + header_data, + self._dimensions, + ) + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message="Error occurred while building" + " block header variables for block " + '"{}"'.format(last_header.name), + ) + + def _new_dataset( + self, key, dataset_struct, block_header=False, initial_val=None + ): + dataset_path = self.path + (key,) + if block_header: + if ( + dataset_struct.type == DatumType.integer + and initial_val is not None + and len(initial_val) >= 1 + and dataset_struct.get_record_size()[0] == 1 + ): + # stress periods are stored 0 based + initial_val = int(initial_val[0]) - 1 + if isinstance(initial_val, list): + initial_val_path = tuple(initial_val) + initial_val = [tuple(initial_val)] + else: + initial_val_path = initial_val + try: + new_data = self.data_factory( + self._simulation_data, + self._model_or_sim, + dataset_struct, + True, + dataset_path, + self._dimensions, + initial_val, + self._container_package, + ) + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message="Error occurred while adding" + ' dataset "{}" to block ' + '"{}"'.format(dataset_struct.name, self.structure.name), + ) + self.block_headers[-1].add_data_item(new_data, initial_val_path) + + else: + try: + self.datasets[key] = self.data_factory( + self._simulation_data, + self._model_or_sim, + dataset_struct, + True, + dataset_path, + self._dimensions, + initial_val, + self._container_package, + ) + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message="Error occurred while adding" + ' dataset "{}" to block ' + '"{}"'.format(dataset_struct.name, self.structure.name), + ) + for keyword in dataset_struct.get_keywords(): + self.datasets_keyword[keyword] = dataset_struct + + def is_empty(self): + """Returns true if this block is empty.""" + for key, dataset in self.datasets.items(): + try: + has_data = dataset.has_data() + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message="Error occurred while verifying" + ' data of dataset "{}" in block ' + '"{}"'.format(dataset.structure.name, self.structure.name), + ) + + if has_data is not None and has_data: + return False + return True + + def load(self, block_header, fd, strict=True): + """Loads block from file object. file object must be advanced to + beginning of block before calling. + + Parameters + ---------- + block_header : MFBlockHeader + Block header for block block being loaded. + fd : file + File descriptor of file being loaded + strict : bool + Enforce strict MODFLOW 6 file format. + """ + # verify number of header variables + if ( + len(block_header.variable_strings) + < self.structure.number_non_optional_block_header_data() + ): + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + warning_str = ( + 'WARNING: Block header for block "{}" does not ' + "contain the correct number of " + "variables {}".format(block_header.name, self.path) + ) + print(warning_str) + return + + if self.loaded: + # verify header has not already been loaded + for bh_current in self.block_headers: + if bh_current.is_same_header(block_header): + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + warning_str = ( + 'WARNING: Block header for block "{}" is ' + "not a unique block header " + "{}".format(block_header.name, self.path) + ) + print(warning_str) + return + + # init + self.enabled = True + if not self.loaded: + self.block_headers = [] + block_header.block = self + self.block_headers.append(block_header) + + # process any header variable + if len(self.structure.block_header_structure) > 0: + dataset = self.structure.block_header_structure[0] + self._new_dataset( + dataset.name, + dataset, + True, + self.block_headers[-1].variable_strings, + ) + + # handle special readasarrays case + if ( + self._container_package.structure.read_as_arrays + or ( + hasattr(self._container_package, "aux") + and self._container_package.aux.structure.layered + ) + ): + # auxiliary variables may appear with aux variable name as keyword + aux_vars = self._container_package.auxiliary.get_data() + if aux_vars is not None: + for var_name in list(aux_vars[0])[1:]: + self.datasets_keyword[(var_name,)] = ( + self._container_package.aux.structure + ) + + comments = [] + + # capture any initial comments + initial_comment = MFComment("", "", 0) + fd_block = fd + line = fd_block.readline() + datautil.PyListUtil.reset_delimiter_used() + arr_line = datautil.PyListUtil.split_data_line(line) + post_data_comments = MFComment("", "", self._simulation_data, 0) + while MFComment.is_comment(line, True): + initial_comment.add_text(line) + line = fd_block.readline() + arr_line = datautil.PyListUtil.split_data_line(line) + + # if block not empty + external_file_info = None + if not (len(arr_line[0]) > 2 and arr_line[0][:3].upper() == "END"): + if arr_line[0].lower() == "open/close": + # open block contents from external file + fd_block.readline() + root_path = self._simulation_data.mfpath.get_sim_path() + try: + file_name = os.path.split(arr_line[1])[-1] + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print( + f' opening external file "{file_name}"...' + ) + external_file_info = arr_line + except: + type_, value_, traceback_ = sys.exc_info() + message = f'Error reading external file specified in line "{line}"' + raise MFDataException( + self._container_package.model_name, + self._container_package._get_pname(), + self.path, + "reading external file", + self.structure.name, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self._simulation_data.debug, + ) + if len(self.structure.data_structures) <= 1: + # load a single data set + dataset = self.datasets[next(iter(self.datasets))] + try: + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print( + f" loading data {dataset.structure.name}..." + ) + next_line = dataset.load( + line, + fd_block, + self.block_headers[-1], + initial_comment, + external_file_info, + ) + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message='Error occurred while loading data "{}" in ' + 'block "{}" from file "{}"' + ".".format( + dataset.structure.name, + self.structure.name, + fd_block.name, + ), + ) + package_info_list = self._get_package_info(dataset) + if package_info_list is not None: + for package_info in package_info_list: + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print( + f" loading child package {package_info[0]}..." + ) + fname = package_info[1] + if package_info[2] is not None: + fname = os.path.join(package_info[2], fname) + filemgr = self._simulation_data.mfpath + fname = filemgr.strip_model_relative_path( + self._model_or_sim.name, fname + ) + pkg = self._model_or_sim.load_package( + package_info[0], + fname, + package_info[1], + True, + "", + package_info[3], + self._container_package, + ) + if hasattr(self._container_package, package_info[0]): + package_group = getattr( + self._container_package, package_info[0] + ) + package_group._append_package( + pkg, pkg.filename, False + ) + + if next_line[1] is not None: + arr_line = datautil.PyListUtil.split_data_line( + next_line[1] + ) + else: + arr_line = "" + # capture any trailing comments + dataset.post_data_comments = post_data_comments + while arr_line and ( + len(next_line[1]) <= 2 or arr_line[0][:3].upper() != "END" + ): + next_line[1] = fd_block.readline().strip() + arr_line = datautil.PyListUtil.split_data_line( + next_line[1] + ) + if arr_line and ( + len(next_line[1]) <= 2 + or arr_line[0][:3].upper() != "END" + ): + post_data_comments.add_text(" ".join(arr_line)) + else: + # look for keyword and store line as data or comment + try: + key, results = self._find_data_by_keyword( + line, fd_block, initial_comment + ) + except MFInvalidTransientBlockHeaderException as e: + warning_str = f"WARNING: {e}" + print(warning_str) + self.block_headers.pop() + return + + self._save_comments(arr_line, line, key, comments) + if results[1] is None or results[1][:3].upper() != "END": + # block consists of unordered datasets + # load the data sets out of order based on + # initial constants + line = " " + while line != "": + line = fd_block.readline() + arr_line = datautil.PyListUtil.split_data_line(line) + if arr_line: + # determine if at end of block + if ( + len(arr_line[0]) > 2 + and arr_line[0][:3].upper() == "END" + ): + break + # look for keyword and store line as data o + # r comment + key, result = self._find_data_by_keyword( + line, fd_block, initial_comment + ) + self._save_comments(arr_line, line, key, comments) + if ( + result[1] is not None + and result[1][:3].upper() == "END" + ): + break + else: + # block empty, store empty array in block variables + empty_arr = [] + for ds in self.datasets.values(): + if isinstance(ds, mfdata.MFTransient): + transient_key = block_header.get_transient_key() + ds.set_data(empty_arr, key=transient_key) + self.loaded = True + self.is_valid() + + def _find_data_by_keyword(self, line, fd, initial_comment): + first_key = None + nothing_found = False + next_line = [True, line] + while next_line[0] and not nothing_found: + arr_line = datautil.PyListUtil.split_data_line(next_line[1]) + key = datautil.find_keyword(arr_line, self.datasets_keyword) + if key is not None: + ds_name = self.datasets_keyword[key].name + try: + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print(f" loading data {ds_name}...") + next_line = self.datasets[ds_name].load( + next_line[1], + fd, + self.block_headers[-1], + initial_comment, + ) + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message="Error occurred while " + 'loading data "{}" in ' + 'block "{}" from file "{}"' + ".".format(ds_name, self.structure.name, fd.name), + ) + + # see if first item's name indicates a reference to + # another package + package_info_list = self._get_package_info( + self.datasets[ds_name] + ) + if package_info_list is not None: + for package_info in package_info_list: + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print( + f" loading child package {package_info[1]}..." + ) + fname = package_info[1] + if package_info[2] is not None: + fname = os.path.join(package_info[2], fname) + filemgr = self._simulation_data.mfpath + fname = filemgr.strip_model_relative_path( + self._model_or_sim.name, fname + ) + pkg = self._model_or_sim.load_package( + package_info[0], + fname, + package_info[1], + True, + "", + package_info[3], + self._container_package, + ) + if hasattr(self._container_package, package_info[0]): + package_group = getattr( + self._container_package, package_info[0] + ) + package_group._append_package( + pkg, pkg.filename, False + ) + if first_key is None: + first_key = key + nothing_found = False + elif ( + arr_line[0].lower() == "readasarrays" + and self.path[-1].lower() == "options" + and self._container_package.structure.read_as_arrays is False + ): + error_msg = ( + "ERROR: Attempting to read a ReadAsArrays " + "package as a non-ReadAsArrays " + "package {}".format(self.path) + ) + raise ReadAsArraysException(error_msg) + else: + nothing_found = True + + if first_key is None: + # look for recarrays. if there is a lone recarray in this block, + # use it by default + recarrays = self.structure.get_all_recarrays() + if len(recarrays) != 1: + return key, [None, None] + dataset = self.datasets[recarrays[0].name] + ds_result = dataset.load( + line, fd, self.block_headers[-1], initial_comment + ) + + # see if first item's name indicates a reference to another + # package + package_info_list = self._get_package_info(dataset) + if package_info_list is not None: + for package_info in package_info_list: + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print( + f" loading child package {package_info[0]}..." + ) + fname = package_info[1] + if package_info[2] is not None: + fname = os.path.join(package_info[2], fname) + filemgr = self._simulation_data.mfpath + fname = filemgr.strip_model_relative_path( + self._model_or_sim.name, fname + ) + pkg = self._model_or_sim.load_package( + package_info[0], + fname, + None, + True, + "", + package_info[3], + self._container_package, + ) + if hasattr(self._container_package, package_info[0]): + package_group = getattr( + self._container_package, package_info[0] + ) + package_group._append_package(pkg, pkg.filename, False) + + return recarrays[0].keyword, ds_result + else: + return first_key, next_line + + def _get_package_info(self, dataset): + if not dataset.structure.file_data: + return None + for index in range(0, len(dataset.structure.data_item_structures)): + data_item = dataset.structure.data_item_structures[index] + if ( + data_item.type == DatumType.keyword + or data_item.type == DatumType.string + ): + item_name = data_item.name + package_type = item_name[:-1] + model_type = self._model_or_sim.structure.model_type + # not all packages have the same naming convention + # try different naming conventions to find the appropriate + # package + package_types = [ + package_type, + f"{self._container_package.package_type}" + f"{package_type}", + ] + package_type_found = None + for ptype in package_types: + if ( + PackageContainer.package_factory(ptype, model_type) + is not None + ): + package_type_found = ptype + break + if package_type_found is not None: + try: + data = dataset.get_data() + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message="Error occurred while " + 'getting data from "{}" ' + 'in block "{}".'.format( + dataset.structure.name, self.structure.name + ), + ) + package_info_list = [] + if isinstance(data, np.recarray): + for row in data: + self._add_to_info_list( + package_info_list, + row[index], + package_type_found, + ) + else: + self._add_to_info_list( + package_info_list, data, package_type_found + ) + + return package_info_list + return None + + def _add_to_info_list( + self, package_info_list, file_location, package_type_found + ): + file_path, file_name = os.path.split(file_location) + dict_package_name = f"{package_type_found}_{self.path[-2]}" + package_info_list.append( + ( + package_type_found, + file_name, + file_path, + dict_package_name, + ) + ) + + def _save_comments(self, arr_line, line, key, comments): + # FIX: Save these comments somewhere in the data set + if key not in self.datasets_keyword: + if MFComment.is_comment(key, True): + if comments: + comments.append("\n") + comments.append(arr_line) + + def write(self, fd, ext_file_action=ExtFileAction.copy_relative_paths): + """Writes block to a file object. + + Parameters + ---------- + fd : file object + File object to write to. + + """ + # never write an empty block + is_empty = self.is_empty() + if ( + is_empty + and self.structure.name.lower() != "exchanges" + and self.structure.name.lower() != "options" + and self.structure.name.lower() != "sources" + and self.structure.name.lower() != "stressperioddata" + ): + return + if self.structure.repeating(): + repeating_datasets = self._find_repeating_datasets() + for repeating_dataset in repeating_datasets: + # resolve any missing block headers + self._add_missing_block_headers(repeating_dataset) + for block_header in sorted(self.block_headers): + # write block + self._write_block(fd, block_header, ext_file_action) + else: + self._write_block(fd, self.block_headers[0], ext_file_action) + + def _add_missing_block_headers(self, repeating_dataset): + key_data_list = repeating_dataset.get_active_key_list() + # assemble a dictionary of data keys and empty keys + key_dict = {} + for key in key_data_list: + key_dict[key[0]] = True + for key, value in repeating_dataset.empty_keys.items(): + if value: + key_dict[key] = True + for key in key_dict.keys(): + has_data = repeating_dataset.has_data(key) + empty_key = ( + key in repeating_dataset.empty_keys + and repeating_dataset.empty_keys[key] + ) + if not self.header_exists(key) and (has_data or empty_key): + self._build_repeating_header([key]) + + def header_exists(self, key, data_path=None): + if not isinstance(key, list): + if key is None: + return + comp_key_list = [key] + else: + comp_key_list = key + for block_header in self.block_headers: + transient_key = block_header.get_transient_key(data_path) + if transient_key is True: + return + for comp_key in comp_key_list: + if transient_key is not None and transient_key == comp_key: + return True + return False + + def set_all_data_external( + self, + base_name, + check_data=True, + external_data_folder=None, + binary=False, + ): + """Sets the block's list and array data to be stored externally, + base_name is external file name's prefix, check_data determines + if data error checking is enabled during this process. + + Warning + ------- + The MF6 check mechanism is deprecated pending reimplementation + in a future release. While the checks API will remain in place + through 3.x, it may be unstable, and will likely change in 4.x. + + Parameters + ---------- + base_name : str + Base file name of external files where data will be written to. + check_data : bool + Whether to do data error checking. + external_data_folder + Folder where external data will be stored + binary: bool + Whether file will be stored as binary + + """ + + for key, dataset in self.datasets.items(): + lst_data = isinstance(dataset, mfdatalist.MFList) or isinstance( + dataset, mfdataplist.MFPandasList + ) + if ( + isinstance(dataset, mfdataarray.MFArray) + or (lst_data and dataset.structure.type == DatumType.recarray) + and dataset.enabled + ): + if not binary or ( + lst_data + and ( + dataset.data_dimensions.package_dim.boundnames() + or not dataset.structure.basic_item + ) + ): + ext = "txt" + binary = False + else: + ext = "bin" + file_path = f"{base_name}_{dataset.structure.name}.{ext}" + replace_existing_external = False + if external_data_folder is not None: + # get simulation root path + root_path = self._simulation_data.mfpath.get_sim_path() + # get model relative path, if it exists + if isinstance(self._model_or_sim, ModelInterface): + name = self._model_or_sim.name + rel_path = ( + self._simulation_data.mfpath.model_relative_path[ + name + ] + ) + if rel_path is not None: + root_path = os.path.join(root_path, rel_path) + full_path = os.path.join(root_path, external_data_folder) + if not os.path.exists(full_path): + # create new external data folder + os.makedirs(full_path) + file_path = os.path.join(external_data_folder, file_path) + replace_existing_external = True + dataset.store_as_external_file( + file_path, + replace_existing_external=replace_existing_external, + check_data=check_data, + binary=binary, + ) + + def set_all_data_internal(self, check_data=True): + """Sets the block's list and array data to be stored internally, + check_data determines if data error checking is enabled during this + process. + + Warning + ------- + The MF6 check mechanism is deprecated pending reimplementation + in a future release. While the checks API will remain in place + through 3.x, it may be unstable, and will likely change in 4.x. + + Parameters + ---------- + check_data : bool + Whether to do data error checking. + + """ + + for key, dataset in self.datasets.items(): + if ( + isinstance(dataset, mfdataarray.MFArray) + or ( + ( + isinstance(dataset, mfdatalist.MFList) + or isinstance(dataset, mfdataplist.MFPandasList) + ) + and dataset.structure.type == DatumType.recarray + ) + and dataset.enabled + ): + dataset.store_internal(check_data=check_data) + + def _find_repeating_datasets(self): + repeating_datasets = [] + for key, dataset in self.datasets.items(): + if dataset.repeating: + repeating_datasets.append(dataset) + return repeating_datasets + + def _prepare_external(self, fd, file_name, binary=False): + fd_main = fd + fd_path = self._simulation_data.mfpath.get_model_path(self.path[0]) + # resolve full file and folder path + fd_file_path = os.path.join(fd_path, file_name) + fd_folder_path = os.path.split(fd_file_path)[0] + if fd_folder_path != "": + if not os.path.exists(fd_folder_path): + # create new external data folder + os.makedirs(fd_folder_path) + return fd_main, fd_file_path + + def _write_block(self, fd, block_header, ext_file_action): + transient_key = None + basic_list = False + dataset_one = list(self.datasets.values())[0] + if isinstance( + dataset_one, + (mfdataplist.MFPandasList, mfdataplist.MFPandasTransientList), + ): + basic_list = True + for dataset in self.datasets.values(): + assert isinstance( + dataset, + ( + mfdataplist.MFPandasList, + mfdataplist.MFPandasTransientList, + ), + ) + # write block header + block_header.write_header(fd) + if len(block_header.data_items) > 0: + transient_key = block_header.get_transient_key() + + # gather data sets to write + data_set_output = [] + data_found = False + for key, dataset in self.datasets.items(): + try: + if transient_key is None: + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print( + f" writing data {dataset.structure.name}..." + ) + if basic_list: + ext_fname = dataset.external_file_name() + if ext_fname is not None: + binary = dataset.binary_ext_data() + # write block contents to external file + fd_main, fd = self._prepare_external( + fd, ext_fname, binary + ) + dataset.write_file_entry(fd, fd_main=fd_main) + fd = fd_main + else: + dataset.write_file_entry(fd) + else: + data_set_output.append( + dataset.get_file_entry( + ext_file_action=ext_file_action + ) + ) + data_found = True + else: + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print( + " writing data {} ({}).." ".".format( + dataset.structure.name, transient_key + ) + ) + if basic_list: + ext_fname = dataset.external_file_name(transient_key) + if ext_fname is not None: + binary = dataset.binary_ext_data(transient_key) + # write block contents to external file + fd_main, fd = self._prepare_external( + fd, ext_fname, binary + ) + dataset.write_file_entry( + fd, + transient_key, + ext_file_action=ext_file_action, + fd_main=fd_main, + ) + fd = fd_main + else: + dataset.write_file_entry( + fd, + transient_key, + ext_file_action=ext_file_action, + ) + else: + if dataset.repeating: + output = dataset.get_file_entry( + transient_key, ext_file_action=ext_file_action + ) + if output is not None: + data_set_output.append(output) + data_found = True + else: + data_set_output.append( + dataset.get_file_entry( + ext_file_action=ext_file_action + ) + ) + data_found = True + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message=( + "Error occurred while writing data " + f'"{dataset.structure.name}" in block ' + f'"{self.structure.name}" to file "{fd.name}"' + ), + ) + if not data_found: + return + if not basic_list: + # write block header + block_header.write_header(fd) + + if self.external_file_name is not None: + indent_string = self._simulation_data.indent_string + fd.write( + f"{indent_string}open/close " + f'"{self.external_file_name}"\n' + ) + # write block contents to external file + fd_main, fd = self._prepare_external( + fd, self.external_file_name + ) + # write data sets + for output in data_set_output: + fd.write(output) + + # write trailing comments + pth = block_header.blk_trailing_comment_path + if pth in self._simulation_data.mfdata: + self._simulation_data.mfdata[pth].write(fd) + + if self.external_file_name is not None and not basic_list: + # switch back writing to package file + fd.close() + fd = fd_main + + # write block footer + block_header.write_footer(fd) + + # write post block comments + pth = block_header.blk_post_comment_path + if pth in self._simulation_data.mfdata: + self._simulation_data.mfdata[pth].write(fd) + + # write extra line if comments are off + if not self._simulation_data.comments_on: + fd.write("\n") + + def is_allowed(self): + """Determine if block is valid based on the values of dependent + MODFLOW variables.""" + if self.structure.variable_dependant_path: + # fill in empty part of the path with the current path + if len(self.structure.variable_dependant_path) == 3: + dependant_var_path = ( + self.path[0], + ) + self.structure.variable_dependant_path + elif len(self.structure.variable_dependant_path) == 2: + dependant_var_path = ( + self.path[0], + self.path[1], + ) + self.structure.variable_dependant_path + elif len(self.structure.variable_dependant_path) == 1: + dependant_var_path = ( + self.path[0], + self.path[1], + self.path[2], + ) + self.structure.variable_dependant_path + else: + dependant_var_path = None + + # get dependency + dependant_var = None + mf_data = self._simulation_data.mfdata + if dependant_var_path in mf_data: + dependant_var = mf_data[dependant_var_path] + + # resolve dependency + if self.structure.variable_value_when_active[0] == "Exists": + exists = self.structure.variable_value_when_active[1] + if dependant_var and exists.lower() == "true": + return True + elif not dependant_var and exists.lower() == "false": + return True + else: + return False + elif not dependant_var: + return False + elif self.structure.variable_value_when_active[0] == ">": + min_val = self.structure.variable_value_when_active[1] + if dependant_var > float(min_val): + return True + else: + return False + elif self.structure.variable_value_when_active[0] == "<": + max_val = self.structure.variable_value_when_active[1] + if dependant_var < float(max_val): + return True + else: + return False + return True + + def is_valid(self): + """ + Returns true if the block is valid. + """ + # check data sets + for dataset in self.datasets.values(): + # Non-optional datasets must be enabled + if not dataset.structure.optional and not dataset.enabled: + return False + # Enabled blocks must be valid + if dataset.enabled and not dataset.is_valid: + return False + # check variables + for block_header in self.block_headers: + for dataset in block_header.data_items: + # Non-optional datasets must be enabled + if not dataset.structure.optional and not dataset.enabled: + return False + # Enabled blocks must be valid + if dataset.enabled and not dataset.is_valid(): + return False + + +class MFPackage(PackageInterface): + """ + Provides an interface for the user to specify data to build a package. + + Parameters + ---------- + parent : MFModel, MFSimulation, or MFPackage + The parent model, simulation, or package containing this package + package_type : str + String defining the package type + filename : str or PathLike + Name or path of file where this package is stored + quoted_filename : str + Filename with quotes around it when there is a space in the name + pname : str + Package name + loading_package : bool + Whether or not to add this package to the parent container's package + list during initialization + + Attributes + ---------- + blocks : dict + Dictionary of blocks contained in this package by block name + path : tuple + Data dictionary path to this package + structure : PackageStructure + Describes the blocks and data contain in this package + dimensions : PackageDimension + Resolves data dimensions for data within this package + + """ + + def __init__( + self, + parent, + package_type, + filename=None, + pname=None, + loading_package=False, + **kwargs, + ): + parent_file = kwargs.pop("parent_file", None) + if isinstance(parent, MFPackage): + self.model_or_sim = parent.model_or_sim + self.parent_file = parent + elif parent_file is not None: + self.model_or_sim = parent + self.parent_file = parent_file + else: + self.model_or_sim = parent + self.parent_file = None + _internal_package = kwargs.pop("_internal_package", False) + if _internal_package: + self.internal_package = True + else: + self.internal_package = False + self._data_list = [] + self._package_type = package_type + if self.model_or_sim.type == "Model" and package_type.lower() != "nam": + self.model_name = self.model_or_sim.name + else: + self.model_name = None + + # a package must have a dfn_file_name + if not hasattr(self, "dfn_file_name"): + self.dfn_file_name = "" + + if ( + self.model_or_sim.type != "Model" + and self.model_or_sim.type != "Simulation" + ): + message = ( + "Invalid model_or_sim parameter. Expecting either a " + 'model or a simulation. Instead type "{}" was ' + "given.".format(type(self.model_or_sim)) + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.model_name, + pname, + "", + "initializing package", + None, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self.model_or_sim.simulation_data.debug, + ) + + self._package_container = PackageContainer( + self.model_or_sim.simulation_data + ) + self.simulation_data = self.model_or_sim.simulation_data + + self.blocks = {} + self.container_type = [] + self.loading_package = loading_package + if pname is not None: + if not isinstance(pname, str): + message = ( + "Invalid pname parameter. Expecting type str. " + 'Instead type "{}" was ' + "given.".format(type(pname)) + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.model_name, + pname, + "", + "initializing package", + None, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self.model_or_sim.simulation_data.debug, + ) + + self.package_name = pname.lower() + else: + self.package_name = None + + if filename is None: + if self.model_or_sim.type == "Simulation": + # filename uses simulation base name + base_name = os.path.basename( + os.path.normpath(self.model_or_sim.name) + ) + self._filename = f"{base_name}.{package_type}" + else: + # filename uses model base name + self._filename = f"{self.model_or_sim.name}.{package_type}" + else: + if not isinstance(filename, (str, os.PathLike)): + message = ( + "Invalid fname parameter. Expecting type str. " + 'Instead type "{}" was ' + "given.".format(type(filename)) + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.model_name, + pname, + "", + "initializing package", + None, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self.model_or_sim.simulation_data.debug, + ) + self._filename = datautil.clean_filename( + str(filename).replace("\\", "/") + ) + self.path, self.structure = self.model_or_sim.register_package( + self, not loading_package, pname is None, filename is None + ) + self.dimensions = self.create_package_dimensions() + + if self.path is None: + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + print( + "WARNING: Package type {} failed to register property." + " {}".format(self._package_type, self.path) + ) + if self.parent_file is not None: + self.container_type.append(PackageContainerType.package) + # init variables that may be used later + self.post_block_comments = None + self.last_error = None + self.bc_color = "black" + self.__inattr = False + self._child_package_groups = {} + child_builder_call = kwargs.pop("child_builder_call", None) + if ( + self.parent_file is not None + and child_builder_call is None + and package_type in self.parent_file._child_package_groups + ): + # initialize as part of the parent's child package group + chld_pkg_grp = self.parent_file._child_package_groups[package_type] + chld_pkg_grp.init_package(self, self._filename, False) + + # remove any remaining valid kwargs + key_list = list(kwargs.keys()) + for key in key_list: + if "filerecord" in key and hasattr(self, f"{key}"): + kwargs.pop(f"{key}") + # check for extraneous kwargs + if len(kwargs) > 0: + kwargs_str = ", ".join(kwargs.keys()) + excpt_str = ( + f'Extraneous kwargs "{kwargs_str}" provided to MFPackage.' + ) + raise FlopyException(excpt_str) + + def __init_subclass__(cls): + """Register package type""" + super().__init_subclass__() + PackageContainer.packages_by_abbr[cls.package_abbr] = cls + + def __setattr__(self, name, value): + if hasattr(self, name) and getattr(self, name) is not None: + attribute = object.__getattribute__(self, name) + if attribute is not None and isinstance(attribute, mfdata.MFData): + try: + if isinstance(attribute, mfdatalist.MFList): + attribute.set_data(value, autofill=True) + else: + attribute.set_data(value) + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self.model_name, + package=self._get_pname(), + ) + return + + if all( + hasattr(self, attr) for attr in ["model_or_sim", "_package_type"] + ): + if hasattr(self.model_or_sim, "_mg_resync"): + if not self.model_or_sim._mg_resync: + self.model_or_sim._mg_resync = self._mg_resync + + super().__setattr__(name, value) + + def __repr__(self): + return self._get_data_str(True) + + def __str__(self): + return self._get_data_str(False) + + @property + def filename(self): + """Package's file name.""" + return self._filename + + @property + def quoted_filename(self): + """Package's file name with quotes if there is a space.""" + if " " in self._filename: + return f'"{self._filename}"' + return self._filename + + @filename.setter + def filename(self, fname): + """Package's file name.""" + if ( + isinstance(self.parent_file, MFPackage) + and self.package_type in self.parent_file._child_package_groups + ): + fname = datautil.clean_filename(fname) + try: + child_pkg_group = self.parent_file._child_package_groups[ + self.structure.file_type + ] + child_pkg_group._update_filename(self._filename, fname) + except Exception: + print( + "WARNING: Unable to update file name for parent" + f"package of {self.package_name}." + ) + if self.model_or_sim is not None and fname is not None: + if self._package_type != "nam": + self.model_or_sim.update_package_filename(self, fname) + self._filename = fname + + @property + def package_type(self): + """String describing type of package""" + return self._package_type + + @property + def name(self): + """Name of package""" + return [self.package_name] + + @name.setter + def name(self, name): + """Name of package""" + self.package_name = name + + @property + def parent(self): + """Parent package""" + return self.model_or_sim + + @parent.setter + def parent(self, parent): + """Parent package""" + assert False, "Do not use this setter to set the parent" + + @property + def plottable(self): + """If package is plottable""" + if self.model_or_sim.type == "Simulation": + return False + else: + return True + + @property + def output(self): + """ + Method to get output associated with a specific package + + Returns + ------- + MF6Output object + """ + return MF6Output(self) + + @property + def data_list(self): + """List of data in this package.""" + # return [data_object, data_object, ...] + return self._data_list + + @property + def package_key_dict(self): + """ + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_type_dict + + @property + def package_names(self): + """Returns a list of package names. + + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_names + + @property + def package_dict(self): + """ + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_dict + + @property + def package_type_dict(self): + """ + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_type_dict + + @property + def package_name_dict(self): + """ + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_name_dict + + @property + def package_filename_dict(self): + """ + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_filename_dict + + def get_package(self, name=None, type_only=False, name_only=False): + """ + Finds a package by package name, package key, package type, or partial + package name. returns either a single package, a list of packages, + or None. + + Parameters + ---------- + name : str + Name or type of the package, 'my-riv-1, 'RIV', 'LPF', etc. + type_only : bool + Search for package by type only + name_only : bool + Search for package by name only + + Returns + ------- + pp : Package object + + """ + return self._package_container.get_package(name, type_only, name_only) + + def add_package(self, package): + pkg_type = package.package_type.lower() + if pkg_type in self._package_container.package_type_dict: + for existing_pkg in self._package_container.package_type_dict[ + pkg_type + ]: + if existing_pkg is package: + # do not add the same package twice + return + self._package_container.add_package(package) + + def _get_aux_data(self, aux_names): + if hasattr(self, "stress_period_data"): + spd = self.stress_period_data.get_data() + if ( + 0 in spd + and spd[0] is not None + and aux_names[0][1] in spd[0].dtype.names + ): + return spd + if hasattr(self, "packagedata"): + pd = self.packagedata.get_data() + if aux_names[0][1] in pd.dtype.names: + return pd + if hasattr(self, "perioddata"): + spd = self.perioddata.get_data() + if ( + 0 in spd + and spd[0] is not None + and aux_names[0][1] in spd[0].dtype.names + ): + return spd + if hasattr(self, "aux"): + return self.aux.get_data() + return None + + def _boundnames_active(self): + if hasattr(self, "boundnames"): + if self.boundnames.get_data(): + return True + return False + + def check(self, f=None, verbose=True, level=1, checktype=None): + """ + Data check, returns True on success. + + Warning + ------- + The MF6 check mechanism is deprecated pending reimplementation + in a future release. While the checks API will remain in place + through 3.x, it may be unstable, and will likely change in 4.x. + """ + + if checktype is None: + checktype = mf6check + # do general checks + chk = super().check(f, verbose, level, checktype) + + # do mf6 specific checks + if hasattr(self, "auxiliary"): + # auxiliary variable check + # check if auxiliary variables are defined + aux_names = self.auxiliary.get_data() + if aux_names is not None and len(aux_names[0]) > 1: + num_aux_names = len(aux_names[0]) - 1 + # check for stress period data + aux_data = self._get_aux_data(aux_names) + if aux_data is not None and len(aux_data) > 0: + # make sure the check object exists + if chk is None: + chk = self._get_check(f, verbose, level, checktype) + if isinstance(aux_data, dict): + aux_datasets = list(aux_data.values()) + else: + aux_datasets = [aux_data] + dataset_type = "unknown" + for dataset in aux_datasets: + if isinstance(dataset, np.recarray): + dataset_type = "recarray" + break + elif isinstance(dataset, np.ndarray): + dataset_type = "ndarray" + break + # if aux data is in a list + if dataset_type == "recarray": + # check for time series data + time_series_name_dict = {} + if hasattr(self, "ts") and hasattr( + self.ts, "time_series_namerecord" + ): + # build dictionary of time series data variables + ts_nr = self.ts.time_series_namerecord.get_data() + if ts_nr is not None: + for item in ts_nr: + if len(item) > 0 and item[0] is not None: + time_series_name_dict[item[0]] = True + # auxiliary variables are last unless boundnames + # defined, then second to last + if self._boundnames_active(): + offset = 1 + else: + offset = 0 + + # loop through stress period datasets with aux data + for data in aux_datasets: + if isinstance(data, np.recarray): + for row in data: + row_size = len(row) + aux_start_loc = ( + row_size - num_aux_names - offset - 1 + ) + # loop through auxiliary variables + for idx, var in enumerate( + list(aux_names[0])[1:] + ): + # get index of current aux variable + data_index = aux_start_loc + idx + # verify auxiliary value is either + # numeric or time series variable + if ( + not datautil.DatumUtil.is_float( + row[data_index] + ) + and row[data_index] + not in time_series_name_dict + ): + desc = ( + f"Invalid non-numeric " + f"value " + f"'{row[data_index]}' " + f"in auxiliary data." + ) + chk._add_to_summary( + "Error", + desc=desc, + package=self.package_name, + ) + # else if stress period data is arrays + elif dataset_type == "ndarray": + # loop through auxiliary stress period datasets + for data in aux_datasets: + # verify auxiliary value is either numeric or time + # array series variable + if isinstance(data, np.ndarray): + val = np.isnan(np.sum(data)) + if val: + desc = ( + "One or more nan values were " + "found in auxiliary data." + ) + chk._add_to_summary( + "Warning", + desc=desc, + package=self.package_name, + ) + return chk + + def _get_nan_exclusion_list(self): + excl_list = [] + if hasattr(self, "stress_period_data"): + spd_struct = self.stress_period_data.structure + for item_struct in spd_struct.data_item_structures: + if item_struct.optional or item_struct.keystring_dict: + excl_list.append(item_struct.name) + return excl_list + + def _get_data_str(self, formal, show_data=True): + data_str = ( + "package_name = {}\nfilename = {}\npackage_type = {}" + "\nmodel_or_simulation_package = {}" + "\n{}_name = {}" + "\n".format( + self._get_pname(), + self._filename, + self.package_type, + self.model_or_sim.type.lower(), + self.model_or_sim.type.lower(), + self.model_or_sim.name, + ) + ) + if self.parent_file is not None and formal: + data_str = ( + f"{data_str}parent_file = {self.parent_file._get_pname()}\n\n" + ) + else: + data_str = f"{data_str}\n" + if show_data: + for block in self.blocks.values(): + if formal: + bl_repr = repr(block) + if len(bl_repr.strip()) > 0: + data_str = ( + "{}Block {}\n--------------------\n{}" "\n".format( + data_str, block.structure.name, repr(block) + ) + ) + else: + bl_str = str(block) + if len(bl_str.strip()) > 0: + data_str = ( + "{}Block {}\n--------------------\n{}" "\n".format( + data_str, block.structure.name, str(block) + ) + ) + return data_str + + def _get_pname(self): + if self.package_name is not None: + return str(self.package_name) + else: + return str(self._filename) + + def _get_block_header_info(self, line, path): + # init + header_variable_strs = [] + arr_clean_line = line.strip().split() + header_comment = MFComment( + "", path + (arr_clean_line[1],), self.simulation_data, 0 + ) + # break header into components + if len(arr_clean_line) < 2: + message = ( + "Block header does not contain a name. Name " + 'expected in line "{}".'.format(line) + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.model_name, + self._get_pname(), + self.path, + "parsing block header", + None, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self.simulation_data.debug, + ) + elif len(arr_clean_line) == 2: + return MFBlockHeader( + arr_clean_line[1], + header_variable_strs, + header_comment, + self.simulation_data, + path, + ) + else: + # process text after block name + comment = False + for entry in arr_clean_line[2:]: + # if start of comment + if MFComment.is_comment(entry.strip()[0]): + comment = True + if comment: + header_comment.text = " ".join( + [header_comment.text, entry] + ) + else: + header_variable_strs.append(entry) + return MFBlockHeader( + arr_clean_line[1], + header_variable_strs, + header_comment, + self.simulation_data, + path, + ) + + def _update_size_defs(self): + # build temporary data lookup by name + data_lookup = {} + for block in self.blocks.values(): + for dataset in block.datasets.values(): + data_lookup[dataset.structure.name] = dataset + + # loop through all data + for block in self.blocks.values(): + for dataset in block.datasets.values(): + # if data shape is 1-D + if ( + dataset.structure.shape + and len(dataset.structure.shape) == 1 + ): + # if shape name is data in this package + if dataset.structure.shape[0] in data_lookup: + size_def = data_lookup[dataset.structure.shape[0]] + size_def_name = size_def.structure.name + + if isinstance(dataset, mfdata.MFTransient): + # for transient data always use the maximum size + new_size = -1 + for key in dataset.get_active_key_list(): + try: + data = dataset.get_data(key=key[0]) + except (OSError, MFDataException): + # TODO: Handle case where external file + # path has been moved + data = None + if data is not None: + data_len = len(data) + if data_len > new_size: + new_size = data_len + else: + # for all other data set max to size + new_size = -1 + try: + data = dataset.get_data() + except (OSError, MFDataException): + # TODO: Handle case where external file + # path has been moved + data = None + if data is not None: + new_size = len(dataset.get_data()) + + if size_def.get_data() is None: + current_size = -1 + else: + current_size = size_def.get_data() + + if new_size > current_size: + # store current size + size_def.set_data(new_size) + + # informational message to the user + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + print( + "INFORMATION: {} in {} changed to {} " + "based on size of {}".format( + size_def_name, + size_def.structure.path[:-1], + new_size, + dataset.structure.name, + ) + ) + + def inspect_cells(self, cell_list, stress_period=None): + """ + Inspect model cells. Returns package data associated with cells. + + Parameters + ---------- + cell_list : list of tuples + List of model cells. Each model cell is a tuple of integers. + ex: [(1,1,1), (2,4,3)] + stress_period : int + For transient data, only return data from this stress period. If + not specified or None, all stress period data will be returned. + + Returns + ------- + output : array + Array containing inspection results + + """ + data_found = [] + + # loop through blocks + local_index_names = [] + local_index_blocks = [] + local_index_values = [] + local_index_cellids = [] + # loop through blocks in package + for block in self.blocks.values(): + # loop through data in block + for dataset in block.datasets.values(): + if isinstance(dataset, mfdatalist.MFList): + # handle list data + cellid_column = None + local_index_name = None + # loop through list data column definitions + for index, data_item in enumerate( + dataset.structure.data_item_structures + ): + if index == 0 and data_item.type == DatumType.integer: + local_index_name = data_item.name + # look for cellid column in list data row + if isinstance(data_item, MFDataItemStructure) and ( + data_item.is_cellid or data_item.possible_cellid + ): + cellid_column = index + break + if cellid_column is not None: + data_output = DataSearchOutput(dataset.path) + local_index_vals = [] + local_index_cells = [] + # get data + if isinstance(dataset, mfdatalist.MFTransientList): + # data may be in multiple transient blocks, get + # data from appropriate blocks + main_data = dataset.get_data(stress_period) + if stress_period is not None: + main_data = {stress_period: main_data} + else: + # data is all in one block, get data + main_data = {-1: dataset.get_data()} + + # loop through each dataset + for key, value in main_data.items(): + if value is None: + continue + if data_output.data_header is None: + data_output.data_header = value.dtype.names + # loop through list data rows + for line in value: + # loop through list of cells we are searching + # for + for cell in cell_list: + if isinstance( + line[cellid_column], tuple + ) and cellids_equal( + line[cellid_column], cell + ): + # save data found + data_output.data_entries.append(line) + data_output.data_entry_ids.append(cell) + data_output.data_entry_stress_period.append( + key + ) + if datautil.DatumUtil.is_int(line[0]): + # save index data for further + # processing. assuming index is + # always first entry + local_index_vals.append(line[0]) + local_index_cells.append(cell) + + if ( + local_index_name is not None + and len(local_index_vals) > 0 + ): + # capture index lookups for scanning related data + local_index_names.append(local_index_name) + local_index_blocks.append(block.path[-1]) + local_index_values.append(local_index_vals) + local_index_cellids.append(local_index_cells) + if len(data_output.data_entries) > 0: + data_found.append(data_output) + elif isinstance(dataset, mfdataarray.MFArray): + # handle array data + data_shape = copy.deepcopy( + dataset.structure.data_item_structures[0].shape + ) + if dataset.path[-1] == "top": + # top is a special case where the two datasets + # need to be combined to get the correct layer top + model_grid = self.model_or_sim.modelgrid + main_data = {-1: model_grid.top_botm} + data_shape.append("nlay") + else: + if isinstance(dataset, mfdataarray.MFTransientArray): + # data may be in multiple blocks, get data from + # appropriate blocks + main_data = dataset.get_data(stress_period) + if stress_period is not None: + main_data = {stress_period: main_data} + else: + # data is all in one block, get a process data + main_data = {-1: dataset.get_data()} + if main_data is None: + continue + data_output = DataSearchOutput(dataset.path) + # loop through datasets + for key, array_data in main_data.items(): + if array_data is None: + continue + self.model_or_sim.match_array_cells( + cell_list, data_shape, array_data, key, data_output + ) + if len(data_output.data_entries) > 0: + data_found.append(data_output) + + if len(local_index_names) > 0: + # look for data that shares the index value with data found + # for example a shared well or reach number + for block in self.blocks.values(): + # loop through data + for dataset in block.datasets.values(): + if isinstance(dataset, mfdatalist.MFList): + data_item = dataset.structure.data_item_structures[0] + data_output = DataSearchOutput(dataset.path) + # loop through previous data found + for ( + local_index_name, + local_index_vals, + cell_ids, + local_block_name, + ) in zip( + local_index_names, + local_index_values, + local_index_cellids, + local_index_blocks, + ): + if local_block_name == block.path[-1]: + continue + if ( + isinstance(data_item, MFDataItemStructure) + and data_item.name == local_index_name + and data_item.type == DatumType.integer + ): + # matching data index type found, get data + if isinstance( + dataset, mfdatalist.MFTransientList + ): + # data may be in multiple blocks, get data + # from appropriate blocks + main_data = dataset.get_data(stress_period) + if stress_period is not None: + main_data = {stress_period: main_data} + else: + # data is all in one block + main_data = {-1: dataset.get_data()} + # loop through the data + for key, value in main_data.items(): + if value is None: + continue + if data_output.data_header is None: + data_output.data_header = ( + value.dtype.names + ) + # loop through each row of data + for line in value: + # loop through the index values we are + # looking for + for index_val, cell_id in zip( + local_index_vals, cell_ids + ): + # try to match index values we are + # looking for to the data + if index_val == line[0]: + # save data found + data_output.data_entries.append( + line + ) + data_output.data_entry_ids.append( + index_val + ) + data_output.data_entry_cellids.append( + cell_id + ) + data_output.data_entry_stress_period.append( + key + ) + if len(data_output.data_entries) > 0: + data_found.append(data_output) + return data_found + + def remove(self): + """Removes this package from the simulation/model it is currently a + part of. + """ + self.model_or_sim.remove_package(self) + + def build_child_packages_container(self, pkg_type, filerecord): + """Builds a container object for any child packages. This method is + only intended for FloPy internal use.""" + # get package class + package_obj = PackageContainer.package_factory( + pkg_type, self.model_or_sim.model_type + ) + # create child package object + child_pkgs_name = f"utl{pkg_type}packages" + child_pkgs_obj = PackageContainer.package_factory(child_pkgs_name, "") + if child_pkgs_obj is None and self.model_or_sim.model_type is None: + # simulation level object, try just the package type in the name + child_pkgs_name = f"{pkg_type}packages" + child_pkgs_obj = PackageContainer.package_factory( + child_pkgs_name, "" + ) + if child_pkgs_obj is None: + # see if the package is part of one of the supported model types + for model_type in MFStructure().sim_struct.model_types: + child_pkgs_name = f"{model_type}{pkg_type}packages" + child_pkgs_obj = PackageContainer.package_factory( + child_pkgs_name, "" + ) + if child_pkgs_obj is not None: + break + child_pkgs = child_pkgs_obj( + self.model_or_sim, self, pkg_type, filerecord, None, package_obj + ) + setattr(self, pkg_type, child_pkgs) + self._child_package_groups[pkg_type] = child_pkgs + + def _get_dfn_name_dict(self): + dfn_name_dict = {} + item_num = 0 + for item in self.structure.dfn_list: + if len(item) > 1: + item_name = item[1].split() + if len(item_name) > 1 and item_name[0] == "name": + dfn_name_dict[item_name[1]] = item_num + item_num += 1 + return dfn_name_dict + + def build_child_package(self, pkg_type, data, parameter_name, filerecord): + """Builds a child package. This method is only intended for FloPy + internal use.""" + if not hasattr(self, pkg_type): + self.build_child_packages_container(pkg_type, filerecord) + if data is not None: + package_group = getattr(self, pkg_type) + # build child package file name + child_path = package_group.next_default_file_path() + # create new empty child package + package_obj = PackageContainer.package_factory( + pkg_type, self.model_or_sim.model_type + ) + package = package_obj( + self, filename=child_path, child_builder_call=True + ) + assert hasattr(package, parameter_name) + + if isinstance(data, dict): + # order data correctly + dfn_name_dict = package._get_dfn_name_dict() + ordered_data_items = [] + for key, value in data.items(): + if key in dfn_name_dict: + ordered_data_items.append( + [dfn_name_dict[key], key, value] + ) + else: + ordered_data_items.append([999999, key, value]) + ordered_data_items = sorted( + ordered_data_items, key=lambda x: x[0] + ) + + # evaluate and add data to package + unused_data = {} + for order, key, value in ordered_data_items: + # if key is an attribute of the child package + if isinstance(key, str) and hasattr(package, key): + # set child package attribute + child_data_attr = getattr(package, key) + if isinstance(child_data_attr, mfdatalist.MFList): + child_data_attr.set_data(value, autofill=True) + elif isinstance(child_data_attr, mfdata.MFData): + child_data_attr.set_data(value) + elif key == "fname" or key == "filename": + child_path = value + package._filename = value + else: + setattr(package, key, value) + else: + unused_data[key] = value + if unused_data: + setattr(package, parameter_name, unused_data) + else: + setattr(package, parameter_name, data) + + # append package to list + package_group.init_package(package, child_path) + return package + + def build_mfdata(self, var_name, data=None): + """Returns the appropriate data type object (mfdatalist, mfdataarray, + or mfdatascalar) given that object the appropriate structure (looked + up based on var_name) and any data supplied. This method is for + internal FloPy library use only. + + Parameters + ---------- + var_name : str + Variable name + + data : many supported types + Data contained in this object + + Returns + ------- + data object : MFData subclass + + """ + if self.loading_package: + data = None + for key, block in self.structure.blocks.items(): + if var_name in block.data_structures: + if block.name not in self.blocks: + self.blocks[block.name] = MFBlock( + self.simulation_data, + self.dimensions, + block, + self.path + (key,), + self.model_or_sim, + self, + ) + dataset_struct = block.data_structures[var_name] + var_path = self.path + (key, var_name) + ds = self.blocks[block.name].add_dataset( + dataset_struct, data, var_path + ) + self._data_list.append(ds) + return ds + + message = 'Unable to find variable "{}" in package ' '"{}".'.format( + var_name, self.package_type + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.model_name, + self._get_pname(), + self.path, + "building data objects", + None, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self.simulation_data.debug, + ) + + def set_model_relative_path(self, model_ws): + """Sets the model path relative to the simulation's path. + + Parameters + ---------- + model_ws : str + Model path relative to the simulation's path. + + """ + # update blocks + for key, block in self.blocks.items(): + block.set_model_relative_path(model_ws) + # update sub-packages + for package in self._package_container.packagelist: + package.set_model_relative_path(model_ws) + + def set_all_data_external( + self, + check_data=True, + external_data_folder=None, + base_name=None, + binary=False, + ): + """Sets the package's list and array data to be stored externally. + + Parameters + ---------- + check_data : bool + Determine if data error checking is enabled + external_data_folder + Folder where external data will be stored + base_name: str + Base file name prefix for all files + binary: bool + Whether file will be stored as binary + """ + # set blocks + for key, block in self.blocks.items(): + file_name = os.path.split(self.filename)[1] + if base_name is not None: + file_name = f"{base_name}_{file_name}" + block.set_all_data_external( + file_name, + check_data, + external_data_folder, + binary, + ) + # set sub-packages + for package in self._package_container.packagelist: + package.set_all_data_external( + check_data, + external_data_folder, + base_name, + binary, + ) + + def set_all_data_internal(self, check_data=True): + """Sets the package's list and array data to be stored internally. + + Parameters + ---------- + check_data : bool + Determine if data error checking is enabled + + """ + # set blocks + for key, block in self.blocks.items(): + block.set_all_data_internal(check_data) + # set sub-packages + for package in self._package_container.packagelist: + package.set_all_data_internal(check_data) + + def load(self, strict=True): + """Loads the package from file. + + Parameters + ---------- + strict : bool + Enforce strict checking of data. + + Returns + ------- + success : bool + + """ + # open file + try: + fd_input_file = open( + datautil.clean_filename(self.get_file_path()), "r" + ) + except OSError as e: + if e.errno == errno.ENOENT: + message = "File {} of type {} could not be opened.".format( + self.get_file_path(), self.package_type + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.model_name, + self.package_name, + self.path, + "loading package file", + None, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self.simulation_data.debug, + ) + + try: + self._load_blocks(fd_input_file, strict) + except ReadAsArraysException as err: + fd_input_file.close() + raise ReadAsArraysException(err) + # close file + fd_input_file.close() + + if self.simulation_data.auto_set_sizes: + self._update_size_defs() + + # return validity of file + return self.is_valid() + + def is_valid(self): + """Returns whether or not this package is valid. + + Returns + ------- + is valid : bool + + """ + # Check blocks + for block in self.blocks.values(): + # Non-optional blocks must be enabled + if ( + block.structure.number_non_optional_data() > 0 + and not block.enabled + and block.is_allowed() + ): + self.last_error = ( + f'Required block "{block.block_header.name}" not enabled' + ) + return False + # Enabled blocks must be valid + if block.enabled and not block.is_valid: + self.last_error = f'Invalid block "{block.block_header.name}"' + return False + + return True + + def _load_blocks(self, fd_input_file, strict=True, max_blocks=sys.maxsize): + # init + self.simulation_data.mfdata[self.path + ("pkg_hdr_comments",)] = ( + MFComment("", self.path, self.simulation_data) + ) + self.post_block_comments = MFComment( + "", self.path, self.simulation_data + ) + + blocks_read = 0 + found_first_block = False + line = " " + while line != "": + line = fd_input_file.readline() + clean_line = line.strip() + # If comment or empty line + if MFComment.is_comment(clean_line, True): + self._store_comment(line, found_first_block) + elif len(clean_line) > 4 and clean_line[:5].upper() == "BEGIN": + # parse block header + try: + block_header_info = self._get_block_header_info( + line, self.path + ) + except MFDataException as mfde: + message = ( + "An error occurred while loading block header " + 'in line "{}".'.format(line) + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.model_name, + self._get_pname(), + self.path, + "loading block header", + None, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self.simulation_data.debug, + mfde, + ) + + # if there is more than one possible block with the same name, + # resolve the correct block to use + block_key = block_header_info.name.lower() + block_num = 1 + possible_key = f"{block_header_info.name.lower()}-{block_num}" + if possible_key in self.blocks: + block_key = possible_key + block_header_name = block_header_info.name.lower() + while ( + block_key in self.blocks + and not self.blocks[block_key].is_allowed() + ): + block_key = f"{block_header_name}-{block_num}" + block_num += 1 + + if block_key not in self.blocks: + # block name not recognized, load block as comments and + # issue a warning + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + warning_str = ( + 'WARNING: Block "{}" is not a valid block ' + "name for file type " + "{}.".format(block_key, self.package_type) + ) + print(warning_str) + self._store_comment(line, found_first_block) + while line != "": + line = fd_input_file.readline() + self._store_comment(line, found_first_block) + arr_line = datautil.PyListUtil.split_data_line(line) + if arr_line and ( + len(arr_line[0]) <= 2 + or arr_line[0][:3].upper() == "END" + ): + break + else: + found_first_block = True + skip_block = False + cur_block = self.blocks[block_key] + if cur_block.loaded: + # Only blocks defined as repeating are allowed to have + # multiple entries + header_name = block_header_info.name + if not self.structure.blocks[ + header_name.lower() + ].repeating(): + # warn and skip block + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + warning_str = ( + 'WARNING: Block "{}" has ' + "multiple entries and is not " + "intended to be a repeating " + "block ({} package" + ")".format(header_name, self.package_type) + ) + print(warning_str) + skip_block = True + bhs = cur_block.structure.block_header_structure + bhval = block_header_info.variable_strings + if ( + len(bhs) > 0 + and len(bhval) > 0 + and bhs[0].name == "iper" + ): + nper = self.simulation_data.mfdata[ + ("tdis", "dimensions", "nper") + ].get_data() + bhval_int = datautil.DatumUtil.is_int(bhval[0]) + if not bhval_int or int(bhval[0]) > nper: + # skip block when block stress period is greater + # than nper + skip_block = True + + if not skip_block: + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print( + f" loading block {cur_block.structure.name}..." + ) + # reset comments + self.post_block_comments = MFComment( + "", self.path, self.simulation_data + ) + + cur_block.load( + block_header_info, fd_input_file, strict + ) + + # write post block comment + self.simulation_data.mfdata[ + cur_block.block_headers[-1].blk_post_comment_path + ] = self.post_block_comments + + blocks_read += 1 + if blocks_read >= max_blocks: + break + else: + # treat skipped block as if it is all comments + arr_line = datautil.PyListUtil.split_data_line( + clean_line + ) + self.post_block_comments.add_text(str(line), True) + while arr_line and ( + len(line) <= 2 or arr_line[0][:3].upper() != "END" + ): + line = fd_input_file.readline() + arr_line = datautil.PyListUtil.split_data_line( + line.strip() + ) + if arr_line: + self.post_block_comments.add_text( + str(line), True + ) + self.simulation_data.mfdata[ + cur_block.block_headers[-1].blk_post_comment_path + ] = self.post_block_comments + + else: + if not ( + len(clean_line) == 0 + or (len(line) > 2 and line[:3].upper() == "END") + ): + # Record file location of beginning of unresolved text + # treat unresolved text as a comment for now + self._store_comment(line, found_first_block) + + def write(self, ext_file_action=ExtFileAction.copy_relative_paths): + """Writes the package to a file. + + Parameters + ---------- + ext_file_action : ExtFileAction + How to handle pathing of external data files. + """ + if self.simulation_data.auto_set_sizes: + self._update_size_defs() + + # create any folders in path + package_file_path = self.get_file_path() + package_folder = os.path.split(package_file_path)[0] + if package_folder and not os.path.isdir(package_folder): + os.makedirs(os.path.split(package_file_path)[0]) + + # open file + fd = open(package_file_path, "w") + + # write flopy header + if self.simulation_data.write_headers: + dt = datetime.datetime.now() + header = ( + "# File generated by Flopy version {} on {} at {}." + "\n".format( + __version__, + dt.strftime("%m/%d/%Y"), + dt.strftime("%H:%M:%S"), + ) + ) + fd.write(header) + + # write blocks + self._write_blocks(fd, ext_file_action) + + fd.close() + + def create_package_dimensions(self): + """Creates a package dimensions object. For internal FloPy library + use. + + Returns + ------- + package dimensions : PackageDimensions + + """ + model_dims = None + if self.container_type[0] == PackageContainerType.model: + model_dims = [ + modeldimensions.ModelDimensions( + self.path[0], self.simulation_data + ) + ] + else: + # this is a simulation file that does not correspond to a specific + # model. figure out which model to use and return a dimensions + # object for that model + if self.dfn_file_name[0:3] == "exg": + exchange_rec_array = self.simulation_data.mfdata[ + ("nam", "exchanges", "exchanges") + ].get_data() + if exchange_rec_array is None: + return None + for exchange in exchange_rec_array: + if exchange[1].lower() == self._filename.lower(): + model_dims = [ + modeldimensions.ModelDimensions( + exchange[2], self.simulation_data + ), + modeldimensions.ModelDimensions( + exchange[3], self.simulation_data + ), + ] + break + elif ( + self.dfn_file_name[4:7] == "gnc" + and self.model_or_sim.type == "Simulation" + ): + # get exchange file name associated with gnc package + if self.parent_file is not None: + exg_file_name = self.parent_file.filename + else: + raise Exception( + "Can not create a simulation-level " + "gnc file without a corresponding " + "exchange file. Exchange file must be " + "created first." + ) + # get models associated with exchange file from sim nam file + try: + exchange_recarray_data = ( + self.model_or_sim.name_file.exchanges.get_data() + ) + except MFDataException as mfde: + message = ( + "An error occurred while retrieving exchange " + "data from the simulation name file. The error " + "occurred while processing gnc file " + f'"{self.filename}".' + ) + raise MFDataException( + mfdata_except=mfde, + package=self._get_pname(), + message=message, + ) + assert exchange_recarray_data is not None + model_1 = None + model_2 = None + for exchange in exchange_recarray_data: + if exchange[1] == exg_file_name: + model_1 = exchange[2] + model_2 = exchange[3] + + # assign models to gnc package + model_dims = [ + modeldimensions.ModelDimensions( + model_1, self.simulation_data + ), + modeldimensions.ModelDimensions( + model_2, self.simulation_data + ), + ] + elif self.parent_file is not None: + model_dims = [] + for md in self.parent_file.dimensions.model_dim: + model_name = md.model_name + model_dims.append( + modeldimensions.ModelDimensions( + model_name, self.simulation_data + ) + ) + else: + model_dims = [ + modeldimensions.ModelDimensions(None, self.simulation_data) + ] + return modeldimensions.PackageDimensions( + model_dims, self.structure, self.path + ) + + def _store_comment(self, line, found_first_block): + # Store comment + if found_first_block: + self.post_block_comments.text += line + else: + self.simulation_data.mfdata[ + self.path + ("pkg_hdr_comments",) + ].text += line + + def _write_blocks(self, fd, ext_file_action): + # verify that all blocks are valid + if not self.is_valid(): + message = ( + 'Unable to write out model file "{}" due to the ' + "following error: " + "{} ({})".format(self._filename, self.last_error, self.path) + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.model_name, + self._get_pname(), + self.path, + "writing package blocks", + None, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self.simulation_data.debug, + ) + + # write initial comments + pkg_hdr_comments_path = self.path + ("pkg_hdr_comments",) + if pkg_hdr_comments_path in self.simulation_data.mfdata: + self.simulation_data.mfdata[ + self.path + ("pkg_hdr_comments",) + ].write(fd, False) + + # loop through blocks + block_num = 1 + for block in self.blocks.values(): + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print(f" writing block {block.structure.name}...") + # write block + block.write(fd, ext_file_action=ext_file_action) + block_num += 1 + + def get_file_path(self): + """Returns the package file's path. + + Returns + ------- + file path : str + """ + if self.path[0] in self.simulation_data.mfpath.model_relative_path: + return os.path.join( + self.simulation_data.mfpath.get_model_path(self.path[0]), + self._filename, + ) + else: + return os.path.join( + self.simulation_data.mfpath.get_sim_path(), self._filename + ) + + def export(self, f, **kwargs): + """ + Method to export a package to netcdf or shapefile based on the + extension of the file name (.shp for shapefile, .nc for netcdf) + + Parameters + ---------- + f : str + Filename + kwargs : keyword arguments + modelgrid : flopy.discretization.Grid instance + User supplied modelgrid which can be used for exporting + in lieu of the modelgrid associated with the model object + + Returns + ------- + None or Netcdf object + + """ + from .. import export + + return export.utils.package_export(f, self, **kwargs) + + def plot(self, **kwargs): + """ + Plot 2-D, 3-D, transient 2-D, and stress period list (MfList) + package input data + + Parameters + ---------- + **kwargs : dict + filename_base : str + Base file name that will be used to automatically generate + file names for output image files. Plots will be exported as + image files if file_name_base is not None. (default is None) + file_extension : str + Valid matplotlib.pyplot file extension for savefig(). Only + used if filename_base is not None. (default is 'png') + mflay : int + MODFLOW zero-based layer number to return. If None, then all + all layers will be included. (default is None) + kper : int + MODFLOW zero-based stress period number to return. (default is + zero) + key : str + MfList dictionary key. (default is None) + + Returns + ------- + axes : list + Empty list is returned if filename_base is not None. Otherwise + a list of matplotlib.pyplot.axis are returned. + + """ + from ..plot.plotutil import PlotUtilities + + if not self.plottable: + raise TypeError("Simulation level packages are not plottable") + + axes = PlotUtilities._plot_package_helper(self, **kwargs) + return axes + + @staticmethod + def _add_netcdf_entries(attrs, mname, pname, data_item, auxiliary=None, mesh=None, nlay=1): + if auxiliary: + auxnames = auxiliary + else: + auxnames = [] + + def add_entry(tagname, iaux=None, layer=None): + key = tagname + name = f"{pname}" + if iaux is not None: + key = f"{key}/{iaux}" + name = f"{name}_{auxiliary[iaux]}" + else: + name = f"{name}_{tagname}" + if layer is not None: + key = f"{key}/layer{layer}" + name = f"{name}_l{layer}" + + a = {} + a["varname"] = name.lower() + a["attrs"] = {} + a["attrs"]["modflow_input"] = ( + f"{mname}/{pname}/{tagname}" + ).upper() + if iaux is not None: + a["attrs"]["modflow_iaux"] = iaux + 1 + if layer is not None: + a["attrs"]["layer"] = layer + attrs[key] = a + + if data_item.layered and mesh == "LAYERED": + if data_item.name == "aux" or data_item.name == "auxvar": + for n, auxname in enumerate(auxnames): + for l in range(nlay): + add_entry(data_item.name, n, l + 1) + else: + for l in range(nlay): + add_entry(data_item.name, layer=l + 1) + else: + if data_item.name == "aux" or data_item.name == "auxvar": + for n, auxname in enumerate(auxnames): + add_entry(data_item.name, iaux=n) + else: + add_entry(data_item.name) + + @staticmethod + def netcdf_attrs(mtype, ptype, auxiliary=None, mesh=None, nlay=1): + from .data.mfstructure import DfnPackage, MFSimulationStructure + + attrs = {} + sim_struct = MFSimulationStructure() + + for package in MFPackage.__subclasses__(): + sim_struct.process_dfn(DfnPackage(package)) + p = DfnPackage(package) + c, sc = p.dfn_file_name.split('.')[0].split('-') + if c == mtype.lower() and sc == ptype.lower(): + sim_struct.add_package(p, model_file=False) + exit + + if ptype.lower() in sim_struct.package_struct_objs: + pso = sim_struct.package_struct_objs[ptype.lower()] + for key, block in pso.blocks.items(): + if key != "griddata" and key != "period": + continue + for d in block.data_structures: + if (block.data_structures[d].netcdf): + MFPackage._add_netcdf_entries( + attrs, + mtype, + ptype, + block.data_structures[d], + auxiliary, + mesh, + nlay, + ) + + res_d = {} + for k in list(attrs): + res_d[k] = attrs[k]['attrs'] + + return res_d + + def netcdf_info(self, mesh=None): + attrs = {} + + if self.dimensions.get_aux_variables(): + auxnames = list(self.dimensions.get_aux_variables()[0]) + if len(auxnames) and auxnames[0] == "auxiliary": + auxnames.pop(0) + else: + auxnames = [] + + for key, block in self.blocks.items(): + if key != "griddata" and key != "period": + continue + for dataset in block.datasets.values(): + if isinstance(dataset, mfdataarray.MFArray): + for index, data_item in enumerate( + dataset.structure.data_item_structures + ): + if ( + dataset.structure.netcdf and + dataset.has_data() + ): + MFPackage._add_netcdf_entries( + attrs, + self.model_name, + self.package_name, + dataset.structure, + auxnames, + mesh, + self.model_or_sim.modelgrid.nlay, + ) + + return attrs + + +class MFChildPackages: + """ + Behind the scenes code for creating an interface to access child packages + from a parent package. This class is automatically constructed by the + FloPy library and is for internal library use only. + + Parameters + ---------- + """ + + def __init__( + self, + model_or_sim, + parent, + pkg_type, + filerecord, + package=None, + package_class=None, + ): + self._packages = [] + self._filerecord = filerecord + if package is not None: + self._packages.append(package) + self._model_or_sim = model_or_sim + self._cpparent = parent + self._pkg_type = pkg_type + self._package_class = package_class + + def __init_subclass__(cls): + """Register package""" + super().__init_subclass__() + PackageContainer.packages_by_abbr[cls.package_abbr] = cls + + def __getattr__(self, attr): + if ( + "_packages" in self.__dict__ + and len(self._packages) > 0 + and hasattr(self._packages[0], attr) + ): + item = getattr(self._packages[0], attr) + return item + raise AttributeError(attr) + + def __getitem__(self, k): + if isinstance(k, int): + if k < len(self._packages): + return self._packages[k] + raise ValueError(f"Package index {k} does not exist.") + + def __setattr__(self, key, value): + if ( + key != "_packages" + and key != "_model_or_sim" + and key != "_cpparent" + and key != "_inattr" + and key != "_filerecord" + and key != "_package_class" + and key != "_pkg_type" + ): + if len(self._packages) == 0: + raise Exception( + "No {} package is currently attached to package" + " {}. Use the initialize method to create a(n) " + "{} package before attempting to access its " + "properties.".format( + self._pkg_type, self._cpparent.filename, self._pkg_type + ) + ) + package = self._packages[0] + setattr(package, key, value) + return + super().__setattr__(key, value) + + def __default_file_path_base(self, file_path, suffix=""): + stem = os.path.split(file_path)[1] + stem_lst = stem.split(".") + file_name = ".".join(stem_lst[:-1]) + if len(stem_lst) > 1: + file_ext = stem_lst[-1] + return f"{file_name}.{file_ext}{suffix}.{self._pkg_type}" + elif suffix != "": + return f"{stem}.{self._pkg_type}" + else: + return f"{stem}.{suffix}.{self._pkg_type}" + + def __file_path_taken(self, possible_path): + for package in self._packages: + # Do case insensitive compare + if package.filename.lower() == possible_path.lower(): + return True + return False + + def next_default_file_path(self): + possible_path = self.__default_file_path_base(self._cpparent.filename) + suffix = 0 + while self.__file_path_taken(possible_path): + possible_path = self.__default_file_path_base( + self._cpparent.filename, suffix + ) + suffix += 1 + return possible_path + + def init_package(self, package, fname, remove_packages=True): + if remove_packages: + # clear out existing packages + self._remove_packages() + elif fname is not None: + self._remove_packages(fname) + if fname is None: + # build a file name + fname = self.next_default_file_path() + package._filename = fname + # check file record variable + found = False + fr_data = self._filerecord.get_data() + if fr_data is not None: + for line in fr_data: + if line[0] == fname: + found = True + if not found: + # append file record variable + self._filerecord.append_data([(fname,)]) + # add the package to the list + self._packages.append(package) + + def _update_filename(self, old_fname, new_fname): + file_record = self._filerecord.get_data() + new_file_record_data = [] + if file_record is not None: + file_record_data = file_record[0] + for item in file_record_data: + base, fname = os.path.split(item) + if fname.lower() == old_fname.lower(): + if base: + new_file_record_data.append( + (os.path.join(base, new_fname),) + ) + else: + new_file_record_data.append((new_fname,)) + else: + new_file_record_data.append((item,)) + else: + new_file_record_data.append((new_fname,)) + self._filerecord.set_data(new_file_record_data) + + def _append_package(self, package, fname, update_frecord=True): + if fname is None: + # build a file name + fname = self.next_default_file_path() + package._filename = fname + + if update_frecord: + # set file record variable + file_record = self._filerecord.get_data() + file_record_data = file_record + new_file_record_data = [] + for item in file_record_data: + new_file_record_data.append((item[0],)) + new_file_record_data.append((fname,)) + self._filerecord.set_data(new_file_record_data) + + for existing_pkg in self._packages: + if existing_pkg is package: + # do not add the same package twice + return + # add the package to the list + self._packages.append(package) + + def _remove_packages(self, fname=None, only_pop_from_list=False): + rp_list = [] + for idx, package in enumerate(self._packages): + if fname is None or package.filename == fname: + if not only_pop_from_list: + self._model_or_sim.remove_package(package) + rp_list.append(idx) + for idx in reversed(rp_list): + self._packages.pop(idx) diff --git a/flopy/mf6/tmp/ruff/mfmodel.py b/flopy/mf6/tmp/ruff/mfmodel.py new file mode 100644 index 0000000000..e27308cb02 --- /dev/null +++ b/flopy/mf6/tmp/ruff/mfmodel.py @@ -0,0 +1,2143 @@ +import inspect +import os +import sys +import warnings +from typing import Optional, Union + +import numpy as np + +from ..discretization.grid import Grid +from ..discretization.modeltime import ModelTime +from ..discretization.structuredgrid import StructuredGrid +from ..discretization.unstructuredgrid import UnstructuredGrid +from ..discretization.vertexgrid import VertexGrid +from ..mbase import ModelInterface +from ..utils import datautil +from ..utils.check import mf6check +from .coordinates import modeldimensions +from .data import mfdata, mfdatalist, mfstructure +from .data.mfdatautil import DataSearchOutput, iterable +from .mfbase import ( + ExtFileAction, + FlopyException, + MFDataException, + MFFileMgmt, + PackageContainer, + PackageContainerType, + ReadAsArraysException, + VerbosityLevel, +) +from .mfpackage import MFPackage +from .utils.mfenums import DiscretizationType +from .utils.output_util import MF6Output + + +class MFModel(ModelInterface): + """ + MODFLOW-6 model base class. Represents a single model in a simulation. + + Parameters + ---------- + simulation_data : MFSimulationData + Simulation data object of the simulation this model will belong to + structure : MFModelStructure + Structure of this type of model + modelname : str + Name of the model + model_nam_file : str + Relative path to the model name file from model working folder + version : str + Version of modflow + exe_name : str + Model executable name + model_ws : str + Model working folder path + disfile : str + Relative path to dis file from model working folder + grid_type : str + Type of grid the model will use (structured, unstructured, vertices) + verbose : bool + Verbose setting for model operations (default False) + + Attributes + ---------- + name : str + Name of the model + exe_name : str + Model executable name + packages : dict of MFPackage + Dictionary of model packages + + """ + + def __init__( + self, + simulation, + model_type="gwf6", + modelname="model", + model_nam_file=None, + version="mf6", + exe_name="mf6", + add_to_simulation=True, + structure=None, + model_rel_path=".", + verbose=False, + **kwargs, + ): + self._package_container = PackageContainer(simulation.simulation_data) + self.simulation = simulation + self.simulation_data = simulation.simulation_data + self.name = modelname + self.name_file = None + self._version = version + self.model_type = model_type + self.type = "Model" + + if model_nam_file is None: + model_nam_file = f"{modelname}.nam" + + if add_to_simulation: + self.structure = simulation.register_model( + self, model_type, modelname, model_nam_file + ) + else: + self.structure = structure + self.set_model_relative_path(model_rel_path) + self.exe_name = exe_name + self.dimensions = modeldimensions.ModelDimensions( + self.name, self.simulation_data + ) + self.simulation_data.model_dimensions[modelname] = self.dimensions + self._ftype_num_dict = {} + self._package_paths = {} + self._verbose = verbose + + if model_nam_file is None: + self.model_nam_file = f"{modelname}.nam" + else: + self.model_nam_file = model_nam_file + + # check for spatial reference info in kwargs + xll = kwargs.pop("xll", None) + yll = kwargs.pop("yll", None) + self._xul = kwargs.pop("xul", None) + self._yul = kwargs.pop("yul", None) + rotation = kwargs.pop("rotation", 0.0) + crs = kwargs.pop("crs", None) + # build model grid object + self._modelgrid = Grid(crs=crs, xoff=xll, yoff=yll, angrot=rotation) + + self.start_datetime = None + # check for extraneous kwargs + if len(kwargs) > 0: + kwargs_str = ", ".join(kwargs.keys()) + excpt_str = f'Extraneous kwargs "{kwargs_str}" provided to MFModel.' + raise FlopyException(excpt_str) + + # build model name file + # create name file based on model type - support different model types + package_obj = PackageContainer.package_factory("nam", model_type[0:3]) + if not package_obj: + excpt_str = f"Name file could not be found for model{model_type[0:3]}." + raise FlopyException(excpt_str) + + self.name_file = package_obj( + self, + filename=self.model_nam_file, + pname=self.name, + _internal_package=True, + ) + + def __init_subclass__(cls): + """Register model type""" + super().__init_subclass__() + PackageContainer.modflow_models.append(cls) + PackageContainer.models_by_type[cls.model_type] = cls + + def __getattr__(self, item): + """ + __getattr__ - used to allow for getting packages as if they are + attributes + + Parameters + ---------- + item : str + 3 character package name (case insensitive) + + + Returns + ------- + pp : Package object + Package object of type :class:`flopy.pakbase.Package` + + """ + if item == "name_file" or not hasattr(self, "name_file"): + raise AttributeError(item) + + package = self.get_package(item) + if package is not None: + return package + raise AttributeError(item) + + def __setattr__(self, name, value): + if hasattr(self, name) and getattr(self, name) is not None: + attribute = object.__getattribute__(self, name) + if attribute is not None and isinstance(attribute, mfdata.MFData): + try: + if isinstance(attribute, mfdatalist.MFList): + attribute.set_data(value, autofill=True) + else: + attribute.set_data(value) + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self.name, + package="", + ) + return + super().__setattr__(name, value) + + def __repr__(self): + return self._get_data_str(True) + + def __str__(self): + return self._get_data_str(False) + + def _get_data_str(self, formal): + file_mgr = self.simulation_data.mfpath + data_str = ( + "name = {}\nmodel_type = {}\nversion = {}\nmodel_" + "relative_path = {}" + "\n\n".format( + self.name, + self.model_type, + self.version, + file_mgr.model_relative_path[self.name], + ) + ) + + for package in self.packagelist: + pk_str = package._get_data_str(formal, False) + if formal: + if len(pk_str.strip()) > 0: + data_str = ( + "{}###################\nPackage {}\n" + "###################\n\n" + "{}\n".format(data_str, package._get_pname(), pk_str) + ) + else: + pk_str = package._get_data_str(formal, False) + if len(pk_str.strip()) > 0: + data_str = ( + "{}###################\nPackage {}\n" + "###################\n\n" + "{}\n".format(data_str, package._get_pname(), pk_str) + ) + return data_str + + @property + def package_key_dict(self): + """ + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_type_dict + + @property + def package_dict(self): + """Returns a copy of the package name dictionary. + + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_dict + + @property + def package_names(self): + """Returns a list of package names. + + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_names + + @property + def package_type_dict(self): + """ + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_type_dict + + @property + def package_name_dict(self): + """ + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_name_dict + + @property + def package_filename_dict(self): + """ + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_filename_dict + + @property + def nper(self): + """Number of stress periods. + + Returns + ------- + nper : int + Number of stress periods in the simulation. + + """ + try: + return self.simulation.tdis.nper.array + except AttributeError: + return None + + @property + def modeltime(self): + """Model time discretization information. + + Returns + ------- + modeltime : ModelTime + FloPy object containing time discretization information for the + simulation. + + """ + tdis = self.simulation.get_package("tdis", type_only=True) + period_data = tdis.perioddata.get_data() + + # build steady state data + sto = self.get_package("sto", type_only=True) + if sto is None: + steady = np.full((len(period_data["perlen"])), True, dtype=bool) + else: + steady = np.full((len(period_data["perlen"])), False, dtype=bool) + ss_periods = sto.steady_state.get_active_key_dict() + for period, val in ss_periods.items(): + if val: + ss_periods[period] = sto.steady_state.get_data(period) + tr_periods = sto.transient.get_active_key_dict() + for period, val in tr_periods.items(): + if val: + tr_periods[period] = sto.transient.get_data(period) + if ss_periods: + last_ss_value = False + # loop through steady state array + for index, value in enumerate(steady): + # resolve if current index is steady state or transient + if index in ss_periods and ss_periods[index]: + last_ss_value = True + elif index in tr_periods and tr_periods[index]: + last_ss_value = False + if last_ss_value is True: + steady[index] = True + + # build model time + itmuni = tdis.time_units.get_data() + start_date_time = tdis.start_date_time.get_data() + + self._model_time = ModelTime( + perlen=period_data["perlen"], + nstp=period_data["nstp"], + tsmult=period_data["tsmult"], + time_units=itmuni, + start_datetime=start_date_time, + steady_state=steady, + ) + return self._model_time + + @property + def modeldiscrit(self): + """Basic model spatial discretization information. This is used + internally prior to model spatial discretization information being + fully loaded. + + Returns + ------- + model grid : Grid subclass + FloPy object containing basic spatial discretization information + for the model. + + """ + if self.get_grid_type() == DiscretizationType.DIS: + dis = self.get_package("dis") + return StructuredGrid( + nlay=dis.nlay.get_data(), + nrow=dis.nrow.get_data(), + ncol=dis.ncol.get_data(), + ) + elif self.get_grid_type() == DiscretizationType.DISV: + dis = self.get_package("disv") + return VertexGrid(ncpl=dis.ncpl.get_data(), nlay=dis.nlay.get_data()) + elif self.get_grid_type() == DiscretizationType.DISU: + dis = self.get_package("disu") + nodes = dis.nodes.get_data() + ncpl = np.array([nodes], dtype=int) + return UnstructuredGrid(ncpl=ncpl) + + @property + def modelgrid(self): + """Model spatial discretization information. + + Returns + ------- + model grid : Grid subclass + FloPy object containing spatial discretization information for the + model. + + """ + force_resync = False + if not self._mg_resync: + return self._modelgrid + if self.get_grid_type() == DiscretizationType.DIS: + dis = self.get_package("dis") + if not hasattr(dis, "_init_complete"): + if not hasattr(dis, "delr"): + # dis package has not yet been initialized + return self._modelgrid + else: + # dis package has been partially initialized + self._modelgrid = StructuredGrid( + delc=dis.delc.array, + delr=dis.delr.array, + top=None, + botm=None, + idomain=None, + lenuni=None, + crs=self._modelgrid.crs, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot, + ) + else: + botm = dis.botm.array + idomain = dis.idomain.array + if idomain is None: + force_resync = True + idomain = self._resolve_idomain(idomain, botm) + self._modelgrid = StructuredGrid( + delc=dis.delc.array, + delr=dis.delr.array, + top=dis.top.array, + botm=botm, + idomain=idomain, + lenuni=dis.length_units.array, + crs=self._modelgrid.crs, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot, + ) + elif self.get_grid_type() == DiscretizationType.DISV: + dis = self.get_package("disv") + if not hasattr(dis, "_init_complete"): + if not hasattr(dis, "cell2d"): + # disv package has not yet been initialized + return self._modelgrid + else: + # disv package has been partially initialized + self._modelgrid = VertexGrid( + vertices=dis.vertices.array, + cell2d=dis.cell2d.array, + top=None, + botm=None, + idomain=None, + lenuni=None, + crs=self._modelgrid.crs, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot, + ) + else: + botm = dis.botm.array + idomain = dis.idomain.array + if idomain is None: + force_resync = True + idomain = self._resolve_idomain(idomain, botm) + self._modelgrid = VertexGrid( + vertices=dis.vertices.array, + cell2d=dis.cell2d.array, + top=dis.top.array, + botm=botm, + idomain=idomain, + lenuni=dis.length_units.array, + crs=self._modelgrid.crs, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot, + ) + elif self.get_grid_type() == DiscretizationType.DISU: + dis = self.get_package("disu") + if not hasattr(dis, "_init_complete"): + # disu package has not yet been fully initialized + return self._modelgrid + + # check to see if ncpl can be constructed from ihc array, + # otherwise set ncpl equal to [nodes] + ihc = dis.ihc.array + iac = dis.iac.array + ncpl = UnstructuredGrid.ncpl_from_ihc(ihc, iac) + if ncpl is None: + ncpl = np.array([dis.nodes.get_data()], dtype=int) + cell2d = dis.cell2d.array + idomain = dis.idomain.array + if idomain is None: + idomain = np.ones(dis.nodes.array, dtype=int) + if cell2d is None: + if ( + self.simulation.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + print( + "WARNING: cell2d information missing. Functionality of " + "the UnstructuredGrid will be limited." + ) + + vertices = dis.vertices.array + if vertices is None: + if ( + self.simulation.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + print( + "WARNING: vertices information missing. Functionality " + "of the UnstructuredGrid will be limited." + ) + vertices = None + else: + vertices = np.array(vertices) + + self._modelgrid = UnstructuredGrid( + vertices=vertices, + cell2d=cell2d, + top=dis.top.array, + botm=dis.bot.array, + idomain=idomain, + lenuni=dis.length_units.array, + ncpl=ncpl, + crs=self._modelgrid.crs, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot, + iac=dis.iac.array, + ja=dis.ja.array, + ) + elif self.get_grid_type() == DiscretizationType.DISV1D: + dis = self.get_package("disv1d") + if not hasattr(dis, "_init_complete"): + if not hasattr(dis, "cell1d"): + # disv package has not yet been initialized + return self._modelgrid + else: + # disv package has been partially initialized + self._modelgrid = VertexGrid( + vertices=dis.vertices.array, + cell1d=dis.cell1d.array, + top=None, + botm=None, + idomain=None, + lenuni=None, + crs=self._modelgrid.crs, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot, + ) + else: + botm = dis.bottom.array + idomain = dis.idomain.array + if idomain is None: + force_resync = True + idomain = self._resolve_idomain(idomain, botm) + self._modelgrid = VertexGrid( + vertices=dis.vertices.array, + cell1d=dis.cell1d.array, + top=None, + botm=botm, + idomain=idomain, + lenuni=dis.length_units.array, + crs=self._modelgrid.crs, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot, + ) + elif self.get_grid_type() == DiscretizationType.DIS2D: + dis = self.get_package("dis2d") + if not hasattr(dis, "_init_complete"): + if not hasattr(dis, "delr"): + # dis package has not yet been initialized + return self._modelgrid + else: + # dis package has been partially initialized + self._modelgrid = StructuredGrid( + delc=dis.delc.array, + delr=dis.delr.array, + top=None, + botm=None, + idomain=None, + lenuni=None, + crs=self._modelgrid.crs, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot, + ) + else: + botm = dis.bottom.array + idomain = dis.idomain.array + if idomain is None: + force_resync = True + idomain = self._resolve_idomain(idomain, botm) + self._modelgrid = StructuredGrid( + delc=dis.delc.array, + delr=dis.delr.array, + top=None, + botm=botm, + idomain=idomain, + lenuni=dis.length_units.array, + crs=self._modelgrid.crs, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot, + ) + elif self.get_grid_type() == DiscretizationType.DISV2D: + dis = self.get_package("disv2d") + if not hasattr(dis, "_init_complete"): + if not hasattr(dis, "cell2d"): + # disv package has not yet been initialized + return self._modelgrid + else: + # disv package has been partially initialized + self._modelgrid = VertexGrid( + vertices=dis.vertices.array, + cell2d=dis.cell2d.array, + top=None, + botm=None, + idomain=None, + lenuni=None, + crs=self._modelgrid.crs, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot, + ) + else: + botm = dis.bottom.array + idomain = dis.idomain.array + if idomain is None: + force_resync = True + idomain = self._resolve_idomain(idomain, botm) + self._modelgrid = VertexGrid( + vertices=dis.vertices.array, + cell2d=dis.cell2d.array, + top=None, + botm=botm, + idomain=idomain, + lenuni=dis.length_units.array, + crs=self._modelgrid.crs, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot, + ) + else: + return self._modelgrid + + # get coordinate data from dis file + xorig = dis.xorigin.get_data() + yorig = dis.yorigin.get_data() + angrot = dis.angrot.get_data() + + # resolve offsets + if xorig is None: + xorig = self._modelgrid.xoffset + if xorig is None: + if self._xul is not None: + xorig = self._modelgrid._xul_to_xll(self._xul) + else: + xorig = 0.0 + if yorig is None: + yorig = self._modelgrid.yoffset + if yorig is None: + if self._yul is not None: + yorig = self._modelgrid._yul_to_yll(self._yul) + else: + yorig = 0.0 + if angrot is None: + angrot = self._modelgrid.angrot + self._modelgrid.set_coord_info( + xorig, + yorig, + angrot, + self._modelgrid.crs, + ) + self._mg_resync = not self._modelgrid.is_complete or force_resync + return self._modelgrid + + @property + def packagelist(self): + """List of model packages.""" + return self._package_container.packagelist + + @property + def namefile(self): + """Model namefile object.""" + return self.model_nam_file + + @property + def model_ws(self): + """Model file path.""" + file_mgr = self.simulation_data.mfpath + return file_mgr.get_model_path(self.name) + + @property + def exename(self): + """MODFLOW executable name""" + return self.exe_name + + @property + def version(self): + """Version of MODFLOW""" + return self._version + + @property + def solver_tols(self): + """Returns the solver inner hclose and rclose values. + + Returns + ------- + inner_hclose, rclose : float, float + + """ + ims = self.get_ims_package() + if ims is not None: + rclose = ims.rcloserecord.get_data() + if rclose is not None: + rclose = rclose[0][0] + return ims.inner_hclose.get_data(), rclose + return None + + @property + def laytyp(self): + """Layering type""" + try: + return self.npf.icelltype.array + except AttributeError: + return None + + @property + def hdry(self): + """Dry cell value""" + return -1e30 + + @property + def hnoflo(self): + """No-flow cell value""" + return 1e30 + + @property + def laycbd(self): + """Quasi-3D confining bed. Not supported in MODFLOW-6. + + Returns + ------- + None : None + + """ + return None + + @property + def output(self): + budgetkey = None + if self.model_type == "gwt6": + budgetkey = "MASS BUDGET FOR ENTIRE MODEL" + try: + return MF6Output(self.oc, budgetkey=budgetkey) + except AttributeError: + return MF6Output(self, budgetkey=budgetkey) + + def export(self, f, **kwargs): + """Method to export a model to a shapefile or netcdf file + + Parameters + ---------- + f : str + File name (".nc" for netcdf or ".shp" for shapefile) + or dictionary of .... + **kwargs : keyword arguments + modelgrid: flopy.discretization.Grid + User supplied modelgrid object which will supersede the built + in modelgrid object + if fmt is set to 'vtk', parameters of Vtk initializer + + """ + from ..export import utils + + return utils.model_export(f, self, **kwargs) + + def netcdf_attrs(self, mesh=None): + """Return dictionary of dataset (model) scoped attributes + Parameters + ---------- + mesh : str + mesh type if dataset is ugrid complient + """ + attrs = { + "modflow_grid": "", + "modflow_model": "", + } + if self.get_grid_type() == DiscretizationType.DIS: + attrs["modflow_grid"] = "STRUCTURED" + elif self.get_grid_type() == DiscretizationType.DISV: + attrs["modflow_grid"] = "VERTEX" + + attrs["modflow_model"] = ( + f"{self.name.upper()}: MODFLOW 6 {self.model_type.upper()[0:3]} model" + ) + + # supported => LAYERED + if mesh: + attrs["mesh"] = mesh + + return attrs + + @property + def verbose(self): + """Verbose setting for model operations (True/False)""" + return self._verbose + + @verbose.setter + def verbose(self, verbose): + """Verbose setting for model operations (True/False)""" + self._verbose = verbose + + def check(self, f=None, verbose=True, level=1): + """ + Check model data for common errors. + + Warning + ------- + The MF6 check mechanism is deprecated pending reimplementation + in a future release. While the checks API will remain in place + through 3.x, it may be unstable, and will likely change in 4.x. + + Parameters + ---------- + f : str or file handle + String defining file name or file handle for summary file + of check method output. If a string is passed a file handle + is created. If f is None, check method does not write + results to a summary file. (default is None) + verbose : bool + Boolean flag used to determine if check method results are + written to the screen + level : int + Check method analysis level. If level=0, summary checks are + performed. If level=1, full checks are performed. + + Returns + ------- + success : bool + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow.load('model.nam') + >>> m.check() + """ + + # check instance for model-level check + chk = mf6check(self, f=f, verbose=verbose, level=level) + + return self._check(chk, level) + + @staticmethod + def load_base( + cls_child, + simulation, + structure, + modelname="NewModel", + model_nam_file="modflowtest.nam", + mtype="gwf", + version="mf6", + exe_name: Union[str, os.PathLike] = "mf6", + strict=True, + model_rel_path=os.curdir, + load_only=None, + ): + """ + Class method that loads an existing model. + + Parameters + ---------- + simulation : MFSimulation + simulation object that this model is a part of + simulation_data : MFSimulationData + simulation data object + structure : MFModelStructure + structure of this type of model + model_name : str + name of the model + model_nam_file : str + relative path to the model name file from model working folder + version : str + version of modflow + exe_name : str or PathLike + model executable name or path + strict : bool + strict mode when loading files + model_rel_path : str + relative path of model folder to simulation folder + load_only : list + list of package abbreviations or package names corresponding to + packages that flopy will load. default is None, which loads all + packages. the discretization packages will load regardless of this + setting. subpackages, like time series and observations, will also + load regardless of this setting. + example list: ['ic', 'maw', 'npf', 'oc', 'my_well_package_1'] + + Returns + ------- + model : MFModel + + Examples + -------- + """ + instance = cls_child( + simulation, + modelname, + model_nam_file=model_nam_file, + version=version, + exe_name=exe_name, + add_to_simulation=False, + structure=structure, + model_rel_path=model_rel_path, + ) + + # build case consistent load_only dictionary for quick lookups + load_only = PackageContainer._load_only_dict(load_only) + + # load name file + instance.name_file.load(strict) + + # order packages + vnum = mfstructure.MFStructure().get_version_string() + # FIX: Transport - Priority packages maybe should not be hard coded + priority_packages = { + f"dis{vnum}": 1, + f"disv{vnum}": 1, + f"disu{vnum}": 1, + } + packages_ordered = [] + package_recarray = instance.simulation_data.mfdata[ + (modelname, "nam", "packages", "packages") + ] + if package_recarray.array is None: + return instance + + for item in package_recarray.get_data(): + if item[0] in priority_packages: + packages_ordered.insert(0, (item[0], item[1], item[2])) + else: + packages_ordered.append((item[0], item[1], item[2])) + + # load packages + sim_struct = mfstructure.MFStructure().sim_struct + instance._ftype_num_dict = {} + for ftype, fname, pname in packages_ordered: + ftype_orig = ftype + ftype = ftype[0:-1].lower() + if ( + ftype in structure.package_struct_objs + or ftype in sim_struct.utl_struct_objs + ): + if ( + load_only is not None + and not PackageContainer._in_pkg_list( + priority_packages, ftype_orig, pname + ) + and not PackageContainer._in_pkg_list(load_only, ftype_orig, pname) + ): + if ( + simulation.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + print(f" skipping package {ftype}...") + continue + if model_rel_path and model_rel_path != ".": + # strip off model relative path from the file path + filemgr = simulation.simulation_data.mfpath + fname = filemgr.strip_model_relative_path(modelname, fname) + if ( + simulation.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + print(f" loading package {ftype}...") + # load package + instance.load_package(ftype, fname, pname, strict, None) + sim_data = simulation.simulation_data + if ftype == "dis" and not sim_data.max_columns_user_set: + # set column wrap to ncol + dis = instance.get_package("dis", type_only=True) + if dis is not None and hasattr(dis, "ncol"): + sim_data.max_columns_of_data = dis.ncol.get_data() + sim_data.max_columns_user_set = False + sim_data.max_columns_auto_set = True + # load referenced packages + if modelname in instance.simulation_data.referenced_files: + for ref_file in instance.simulation_data.referenced_files[ + modelname + ].values(): + if ( + ref_file.file_type in structure.package_struct_objs + or ref_file.file_type in sim_struct.utl_struct_objs + ) and not ref_file.loaded: + instance.load_package( + ref_file.file_type, + ref_file.file_name, + None, + strict, + ref_file.reference_path, + ) + ref_file.loaded = True + + # TODO: fix jagged lists where appropriate + + return instance + + def inspect_cells( + self, + cell_list, + stress_period=None, + output_file_path=None, + inspect_budget=True, + inspect_dependent_var=True, + ): + """ + Inspect model cells. Returns model data associated with cells. + + Parameters + ---------- + cell_list : list of tuples + List of model cells. Each model cell is a tuple of integers. + ex: [(1,1,1), (2,4,3)] + stress_period : int + For transient data qnly return data from this stress period. If + not specified or None, all stress period data will be returned. + output_file_path: str + Path to output file that will contain the inspection results + inspect_budget: bool + Inspect budget file + inspect_dependent_var: bool + Inspect head file + Returns + ------- + output : dict + Dictionary containing inspection results + + Examples + -------- + + >>> import flopy + >>> sim = flopy.mf6.MFSimulationBase.load("name", "mf6", "mf6", ".") + >>> model = sim.get_model() + >>> inspect_list = [(2, 3, 2), (0, 4, 2), (0, 2, 4)] + >>> out_file = os.path.join("temp", "inspect_AdvGW_tidal.csv") + >>> model.inspect_cells(inspect_list, output_file_path=out_file) + """ + # handle no cell case + if cell_list is None or len(cell_list) == 0: + return None + + output_by_package = {} + # loop through all packages + for pp in self.packagelist: + # call the package's "inspect_cells" method + package_output = pp.inspect_cells(cell_list, stress_period) + if len(package_output) > 0: + output_by_package[f"{pp.package_name} package"] = package_output + # get dependent variable + if inspect_dependent_var: + try: + if self.model_type == "gwf6": + heads = self.output.head() + name = "heads" + elif self.model_type == "gwt6": + heads = self.output.concentration() + name = "concentration" + else: + inspect_dependent_var = False + except Exception: + inspect_dependent_var = False + if inspect_dependent_var and heads is not None: + kstp_kper_lst = heads.get_kstpkper() + data_output = DataSearchOutput((name,)) + data_output.output = True + for kstp_kper in kstp_kper_lst: + if stress_period is not None and stress_period != kstp_kper[1]: + continue + head_array = np.array(heads.get_data(kstpkper=kstp_kper)) + # flatten output data in disv and disu cases + if len(cell_list[0]) == 2: + head_array = head_array[0, :, :] + elif len(cell_list[0]) == 1: + head_array = head_array[0, 0, :] + # find data matches + self.match_array_cells( + cell_list, + head_array.shape, + head_array, + kstp_kper, + data_output, + ) + if len(data_output.data_entries) > 0: + output_by_package[f"{name} output"] = [data_output] + + # get model dimensions + model_shape = self.modelgrid.shape + + # get budgets + if inspect_budget: + try: + bud = self.output.budget() + except Exception: + inspect_budget = False + if inspect_budget and bud is not None: + kstp_kper_lst = bud.get_kstpkper() + rec_names = bud.get_unique_record_names() + budget_matches = [] + for rec_name in rec_names: + # clean up binary string name + string_name = str(rec_name)[3:-1].strip() + data_output = DataSearchOutput((string_name,)) + data_output.output = True + for kstp_kper in kstp_kper_lst: + if stress_period is not None and stress_period != kstp_kper[1]: + continue + budget_array = np.array( + bud.get_data( + kstpkper=kstp_kper, + text=rec_name, + full3D=True, + )[0] + ) + if len(budget_array.shape) == 4: + # get rid of 4th "time" dimension + budget_array = budget_array[0, :, :, :] + # flatten output data in disv and disu cases + if len(cell_list[0]) == 2 and len(budget_array.shape) >= 3: + budget_array = budget_array[0, :, :] + elif len(cell_list[0]) == 1 and len(budget_array.shape) >= 2: + budget_array = budget_array[0, :] + # find data matches + if budget_array.shape != model_shape: + # no support yet for different shaped budgets like + # flow_ja_face + continue + + self.match_array_cells( + cell_list, + budget_array.shape, + budget_array, + kstp_kper, + data_output, + ) + if len(data_output.data_entries) > 0: + budget_matches.append(data_output) + if len(budget_matches) > 0: + output_by_package["budget output"] = budget_matches + + if len(output_by_package) > 0 and output_file_path is not None: + with open(output_file_path, "w") as fd: + # write document header + fd.write(f"Inspect cell results for model {self.name}\n") + output = [] + for cell in cell_list: + output.append(" ".join([str(i) for i in cell])) + output = ",".join(output) + fd.write(f"Model cells inspected,{output}\n\n") + + for package_name, matches in output_by_package.items(): + fd.write(f"Results from {package_name}\n") + for search_output in matches: + # write header line with data name + fd.write(f",Results from {search_output.path_to_data[-1]}\n") + # write data header + if search_output.transient: + if search_output.output: + fd.write(",stress_period,time_step") + else: + fd.write(",stress_period/key") + if search_output.data_header is not None: + if len(search_output.data_entry_cellids) > 0: + fd.write(",cellid") + h_columns = ",".join(search_output.data_header) + fd.write(f",{h_columns}\n") + else: + fd.write(",cellid,data\n") + # write data found + for index, data_entry in enumerate(search_output.data_entries): + if search_output.transient: + sp = search_output.data_entry_stress_period[index] + if search_output.output: + fd.write(f",{sp[1]},{sp[0]}") + else: + fd.write(f",{sp}") + if search_output.data_header is not None: + if len(search_output.data_entry_cellids) > 0: + cells = search_output.data_entry_cellids[index] + output = " ".join([str(i) for i in cells]) + fd.write(f",{output}") + fd.write(self._format_data_entry(data_entry)) + else: + output = " ".join( + [ + str(i) + for i in search_output.data_entry_ids[index] + ] + ) + fd.write(f",{output}") + fd.write(self._format_data_entry(data_entry)) + fd.write("\n") + return output_by_package + + def match_array_cells(self, cell_list, data_shape, array_data, key, data_output): + # loop through list of cells we are searching for + for cell in cell_list: + if len(data_shape) == 3 or data_shape[0] == "nodes": + # data is by cell + if array_data.ndim == 3 and len(cell) == 3: + data_output.data_entries.append( + array_data[cell[0], cell[1], cell[2]] + ) + data_output.data_entry_ids.append(cell) + data_output.data_entry_stress_period.append(key) + elif array_data.ndim == 2 and len(cell) == 2: + data_output.data_entries.append(array_data[cell[0], cell[1]]) + data_output.data_entry_ids.append(cell) + data_output.data_entry_stress_period.append(key) + elif array_data.ndim == 1 and len(cell) == 1: + data_output.data_entries.append(array_data[cell[0]]) + data_output.data_entry_ids.append(cell) + data_output.data_entry_stress_period.append(key) + else: + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + warning_str = ( + 'WARNING: CellID "{}" not same ' + "number of dimensions as data " + "{}.".format(cell, data_output.path_to_data) + ) + print(warning_str) + elif len(data_shape) == 2: + # get data based on ncpl/lay + if array_data.ndim == 2 and len(cell) == 2: + data_output.data_entries.append(array_data[cell[0], cell[1]]) + data_output.data_entry_ids.append(cell) + data_output.data_entry_stress_period.append(key) + elif array_data.ndim == 1 and len(cell) == 1: + data_output.data_entries.append(array_data[cell[0]]) + data_output.data_entry_ids.append(cell) + data_output.data_entry_stress_period.append(key) + elif len(data_shape) == 1: + # get data based on nodes + if len(cell) == 1 and array_data.ndim == 1: + data_output.data_entries.append(array_data[cell[0]]) + data_output.data_entry_ids.append(cell) + data_output.data_entry_stress_period.append(key) + + @staticmethod + def _format_data_entry(data_entry): + output = "" + if iterable(data_entry, True): + for item in data_entry: + if isinstance(item, tuple): + formatted = " ".join([str(i) for i in item]) + output = f"{output},{formatted}" + else: + output = f"{output},{item}" + return f"{output}\n" + else: + return f",{data_entry}\n" + + def write(self, ext_file_action=ExtFileAction.copy_relative_paths): + """ + Writes out model's package files. + + Parameters + ---------- + ext_file_action : ExtFileAction + Defines what to do with external files when the simulation path has + changed. defaults to copy_relative_paths which copies only files + with relative paths, leaving files defined by absolute paths fixed. + + """ + + # write name file + if self.simulation_data.verbosity_level.value >= VerbosityLevel.normal.value: + print(" writing model name file...") + + self.name_file.write(ext_file_action=ext_file_action) + + if not self.simulation_data.max_columns_user_set: + grid_type = self.get_grid_type() + if grid_type == DiscretizationType.DIS: + self.simulation_data.max_columns_of_data = self.dis.ncol.get_data() + self.simulation_data.max_columns_user_set = False + self.simulation_data.max_columns_auto_set = True + + # write packages + for pp in self.packagelist: + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + print(f" writing package {pp._get_pname()}...") + pp.write(ext_file_action=ext_file_action) + + def get_grid_type(self): + """ + Return the type of grid used by model 'model_name' in simulation + containing simulation data 'simulation_data'. + + Returns + ------- + grid type : DiscretizationType + """ + package_recarray = self.name_file.packages + structure = mfstructure.MFStructure() + if ( + package_recarray.search_data(f"dis{structure.get_version_string()}", 0) + is not None + ): + return DiscretizationType.DIS + elif ( + package_recarray.search_data(f"disv{structure.get_version_string()}", 0) + is not None + ): + return DiscretizationType.DISV + elif ( + package_recarray.search_data(f"disu{structure.get_version_string()}", 0) + is not None + ): + return DiscretizationType.DISU + elif ( + package_recarray.search_data(f"disv1d{structure.get_version_string()}", 0) + is not None + ): + return DiscretizationType.DISV1D + elif ( + package_recarray.search_data(f"dis2d{structure.get_version_string()}", 0) + is not None + ): + return DiscretizationType.DIS2D + elif ( + package_recarray.search_data(f"disv2d{structure.get_version_string()}", 0) + is not None + ): + return DiscretizationType.DISV2D + + return DiscretizationType.UNDEFINED + + def get_ims_package(self): + """Get the IMS package associated with this model. + + Returns + ------- + IMS package : ModflowIms + """ + solution_group = self.simulation.name_file.solutiongroup.get_data(0) + for record in solution_group: + for name in record.dtype.names: + if name == "slntype" or name == "slnfname": + continue + if record[name] == self.name: + return self.simulation.get_solution_package(record.slnfname) + return None + + def get_steadystate_list(self): + """Returns a list of stress periods that are steady state. + + Returns + ------- + steady state list : list + + """ + ss_list = [] + tdis = self.simulation.get_package("tdis") + period_data = tdis.perioddata.get_data() + index = 0 + pd_len = len(period_data) + while index < pd_len: + ss_list.append(True) + index += 1 + + storage = self.get_package("sto", type_only=True) + if storage is not None: + tr_keys = storage.transient.get_keys(True) + ss_keys = storage.steady_state.get_keys(True) + for key in tr_keys: + ss_list[key] = False + for ss_list_key in range(key + 1, len(ss_list)): + for ss_key in ss_keys: + if ss_key == ss_list_key: + break + ss_list[key] = False + return ss_list + + def is_valid(self): + """ + Checks the validity of the model and all of its packages + + Returns + ------- + valid : bool + + """ + + # valid name file + if not self.name_file.is_valid(): + return False + + # valid packages + for pp in self.packagelist: + if not pp.is_valid(): + return False + + # required packages exist + for package_struct in self.structure.package_struct_objs.values(): + if ( + not package_struct.optional + and package_struct.file_type + not in self._package_container.package_type_dict + ): + return False + + return True + + def set_model_relative_path(self, model_ws): + """ + Sets the file path to the model folder relative to the simulation + folder and updates all model file paths, placing them in the model + folder. + + Parameters + ---------- + model_ws : str + Model working folder relative to simulation working folder + + """ + # set all data internal + self.set_all_data_internal(False) + + # update path in the file manager + file_mgr = self.simulation_data.mfpath + file_mgr.set_last_accessed_model_path() + path = model_ws + file_mgr.model_relative_path[self.name] = path + + if model_ws and model_ws != "." and self.simulation.name_file is not None: + model_folder_path = file_mgr.get_model_path(self.name) + if not os.path.exists(model_folder_path): + # make new model folder + os.makedirs(model_folder_path) + # update model name file location in simulation name file + models = self.simulation.name_file.models + models_data = models.get_data() + for index, entry in enumerate(models_data): + old_model_file_name = os.path.split(entry[1])[1] + old_model_base_name = os.path.splitext(old_model_file_name)[0] + if ( + old_model_base_name.lower() == self.name.lower() + or self.name == entry[2] + ): + models_data[index][1] = os.path.join(path, old_model_file_name) + break + models.set_data(models_data) + + if self.name_file is not None: + # update listing file location in model name file + list_file = self.name_file.list.get_data() + if list_file: + path, list_file_name = os.path.split(list_file) + try: + self.name_file.list.set_data(os.path.join(path, list_file_name)) + except MFDataException as mfde: + message = ( + "Error occurred while setting relative " + 'path "{}" in model ' + '"{}".'.format( + os.path.join(path, list_file_name), self.name + ) + ) + raise MFDataException( + mfdata_except=mfde, + model=self.model_name, + package=self.name_file._get_pname(), + message=message, + ) + # update package file locations in model name file + packages = self.name_file.packages + packages_data = packages.get_data() + if packages_data is not None: + for index, entry in enumerate(packages_data): + # get package object associated with entry + package = None + if len(entry) >= 3: + package = self.get_package(entry[2]) + if package is None: + package = self.get_package(entry[0]) + if package is not None: + # combine model relative path with package path + packages_data[index][1] = os.path.join( + path, package.filename + ) + else: + # package not found, create path based on + # information in name file + old_package_name = os.path.split(entry[1])[-1] + packages_data[index][1] = os.path.join( + path, old_package_name + ) + packages.set_data(packages_data) + # update files referenced from within packages + for package in self.packagelist: + package.set_model_relative_path(model_ws) + + def _remove_package_from_dictionaries(self, package): + # remove package from local dictionaries and lists + if package.path in self._package_paths: + del self._package_paths[package.path] + self._package_container.remove_package(package) + + def get_package(self, name=None, type_only=False, name_only=False): + """ + Finds a package by package name, package key, package type, or partial + package name. returns either a single package, a list of packages, + or None. + + Parameters + ---------- + name : str + Name or type of the package, 'my-riv-1, 'RIV', 'LPF', etc. + type_only : bool + Search for package by type only + name_only : bool + Search for package by name only + + Returns + ------- + pp : Package object + + """ + return self._package_container.get_package(name, type_only, name_only) + + def remove_package(self, package_name): + """ + Removes package and all child packages from the model. + `package_name` can be the package's name, type, or package object to + be removed from the model. + + Parameters + ---------- + package_name : str + Package name, package type, or package object to be removed from + the model. + + """ + if isinstance(package_name, MFPackage): + packages = [package_name] + else: + packages = self.get_package(package_name) + if not isinstance(packages, list) and packages is not None: + packages = [packages] + if packages is None: + return + for package in packages: + if package.model_or_sim.name != self.name: + except_text = ( + "Package can not be removed from model " + "{self.model_name} since it is not part of it." + ) + raise mfstructure.FlopyException(except_text) + + self._remove_package_from_dictionaries(package) + + try: + # remove package from name file + package_data = self.name_file.packages.get_data() + except MFDataException as mfde: + message = ( + "Error occurred while reading package names " + "from name file in model " + f'"{self.name}"' + ) + raise MFDataException( + mfdata_except=mfde, + model=self.model_name, + package=self.name_file._get_pname(), + message=message, + ) + try: + new_rec_array = None + for item in package_data: + filename = os.path.basename(item[1]) + if filename != package.filename: + if new_rec_array is None: + new_rec_array = np.rec.array( + [item.tolist()], package_data.dtype + ) + else: + new_rec_array = np.hstack((item, new_rec_array)) + except: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.structure.get_model(), + self.structure.get_package(), + self._path, + "building package recarray", + self.structure.name, + inspect.stack()[0][3], + type_, + value_, + traceback_, + None, + self.simulation_data.debug, + ) + try: + self.name_file.packages.set_data(new_rec_array) + except MFDataException as mfde: + message = ( + "Error occurred while setting package names " + f'from name file in model "{self.name}". Package name ' + f"data:\n{new_rec_array}" + ) + raise MFDataException( + mfdata_except=mfde, + model=self.model_name, + package=self.name_file._get_pname(), + message=message, + ) + + # build list of child packages + child_package_list = [] + for pkg in self.packagelist: + if pkg.parent_file is not None and pkg.parent_file.path == package.path: + child_package_list.append(pkg) + # remove child packages + for child_package in child_package_list: + self._remove_package_from_dictionaries(child_package) + + def update_package_filename(self, package, new_name): + """ + Updates the filename for a package. For internal flopy use only. + + Parameters + ---------- + package : MFPackage + Package object + new_name : str + New package name + """ + try: + # get namefile package data + package_data = self.name_file.packages.get_data() + except MFDataException as mfde: + message = ( + "Error occurred while updating package names " + "from name file in model " + f'"{self.name}".' + ) + raise MFDataException( + mfdata_except=mfde, + model=self.model_name, + package=self.name_file._get_pname(), + message=message, + ) + try: + file_mgr = self.simulation_data.mfpath + model_rel_path = file_mgr.model_relative_path[self.name] + # update namefile package data with new name + new_rec_array = None + old_leaf = os.path.split(package.filename)[1] + for item in package_data: + leaf = os.path.split(item[1])[1] + if leaf == old_leaf: + item[1] = os.path.join(model_rel_path, new_name) + + if new_rec_array is None: + new_rec_array = np.rec.array([item.tolist()], package_data.dtype) + else: + new_rec_array = np.hstack((item, new_rec_array)) + except: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.structure.get_model(), + self.structure.get_package(), + self._path, + "updating package filename", + self.structure.name, + inspect.stack()[0][3], + type_, + value_, + traceback_, + None, + self.simulation_data.debug, + ) + try: + self.name_file.packages.set_data(new_rec_array) + except MFDataException as mfde: + message = ( + "Error occurred while updating package names " + f'from name file in model "{self.name}". Package name ' + f"data:\n{new_rec_array}" + ) + raise MFDataException( + mfdata_except=mfde, + model=self.model_name, + package=self.name_file._get_pname(), + message=message, + ) + + def rename_all_packages(self, name): + """Renames all package files in the model. + + Parameters + ---------- + name : str + Prefix of package names. Packages files will be named + .. + + """ + nam_filename = f"{name}.nam" + self.simulation.rename_model_namefile(self, nam_filename) + self.name_file.filename = nam_filename + self.model_nam_file = nam_filename + package_type_count = {} + for package in self.packagelist: + if package.package_type not in package_type_count: + base_filename, leaf = os.path.split(package.filename) + lleaf = leaf.split(".") + if len(lleaf) > 1: + # keep existing extension + ext = lleaf[-1] + else: + # no extension found, create a new one + ext = package.package_type + new_fileleaf = f"{name}.{ext}" + if base_filename != "": + package.filename = os.path.join(base_filename, new_fileleaf) + else: + package.filename = new_fileleaf + package_type_count[package.package_type] = 1 + else: + package_type_count[package.package_type] += 1 + package.filename = "{}_{}.{}".format( + name, + package_type_count[package.package_type], + package.package_type, + ) + + def set_all_data_external( + self, + check_data=True, + external_data_folder=None, + base_name=None, + binary=False, + ): + """Sets the model's list and array data to be stored externally. + + Warning + ------- + The MF6 check mechanism is deprecated pending reimplementation + in a future release. While the checks API will remain in place + through 3.x, it may be unstable, and will likely change in 4.x. + + Parameters + ---------- + check_data : bool + Determines if data error checking is enabled during this + process. + external_data_folder + Folder, relative to the simulation path or model relative path + (see use_model_relative_path parameter), where external data + will be stored + base_name: str + Base file name prefix for all files + binary: bool + Whether file will be stored as binary + + """ + for package in self.packagelist: + package.set_all_data_external( + check_data, + external_data_folder, + base_name, + binary, + ) + + def set_all_data_internal(self, check_data=True): + """Sets the model's list and array data to be stored externally. + + Parameters + ---------- + check_data : bool + Determines if data error checking is enabled during this + process. + + """ + for package in self.packagelist: + package.set_all_data_internal(check_data) + + def register_package( + self, + package, + add_to_package_list=True, + set_package_name=True, + set_package_filename=True, + ): + """ + Registers a package with the model. This method is used internally + by FloPy and is not intended for use by the end user. + + Parameters + ---------- + package : MFPackage + Package to register + add_to_package_list : bool + Add package to lookup list + set_package_name : bool + Produce a package name for this package + set_package_filename : bool + Produce a filename for this package + + Returns + ------- + path, package structure : tuple, MFPackageStructure + + """ + package.container_type = [PackageContainerType.model] + if package.parent_file is not None: + path = package.parent_file.path + (package.package_type,) + else: + path = (self.name, package.package_type) + package_struct = self.structure.get_package_struct(package.package_type) + if add_to_package_list and path in self._package_paths: + if ( + package_struct is not None + and not package_struct.multi_package_support + and not isinstance(package.parent_file, MFPackage) + ): + # package of this type already exists, replace it + self.remove_package(package.package_type) + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + print( + "WARNING: Package with type {} already exists. " + "Replacing existing package" + ".".format(package.package_type) + ) + elif ( + not set_package_name + and package.package_name in self._package_container.package_name_dict + ): + # package of this type with this name already + # exists, replace it + self.remove_package( + self._package_container.package_name_dict[package.package_name] + ) + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + print( + "WARNING: Package with name {} already exists. " + "Replacing existing package" + ".".format(package.package_name) + ) + + # make sure path is unique + if path in self._package_paths: + path_iter = datautil.PathIter(path) + for new_path in path_iter: + if new_path not in self._package_paths: + path = new_path + break + self._package_paths[path] = 1 + + if package.package_type.lower() == "nam": + if not package.internal_package: + excpt_str = ( + "Unable to register nam file. Do not create your own nam " + "files. Nam files are automatically created and managed " + "for you by FloPy." + ) + print(excpt_str) + raise FlopyException(excpt_str) + + return path, self.structure.name_file_struct_obj + + package_extension = package.package_type + if set_package_name: + # produce a default package name + if package_struct is not None and package_struct.multi_package_support: + # check for other registered packages of this type + name_iter = datautil.NameIter(package.package_type, False) + for package_name in name_iter: + if package_name not in self._package_container.package_name_dict: + package.package_name = package_name + suffix = package_name.split("_") + if ( + len(suffix) > 1 + and datautil.DatumUtil.is_int(suffix[-1]) + and suffix[-1] != "0" + ): + # update file extension to make unique + package_extension = f"{package_extension}_{suffix[-1]}" + break + else: + package.package_name = package.package_type + + if set_package_filename: + # filename uses model base name + package._filename = f"{self.name}.{package.package_type}" + if package._filename in self._package_container.package_filename_dict: + # auto generate a unique file name and register it + file_name = MFFileMgmt.unique_file_name( + package._filename, + self._package_container.package_filename_dict, + ) + package._filename = file_name + + if add_to_package_list: + self._package_container.add_package(package) + + # add obs file to name file if it does not have a parent + if package.package_type in self.structure.package_struct_objs or ( + package.package_type == "obs" and package.parent_file is None + ): + # update model name file + pkg_type = package.package_type.upper() + if ( + package.package_type != "obs" + and self.structure.package_struct_objs[ + package.package_type + ].read_as_arrays + ): + pkg_type = pkg_type[0:-1] + # Model Assumption - assuming all name files have a package + # recarray + file_mgr = self.simulation_data.mfpath + model_rel_path = file_mgr.model_relative_path[self.name] + if model_rel_path != ".": + package_rel_path = os.path.join(model_rel_path, package.filename) + else: + package_rel_path = package.filename + self.name_file.packages.update_record( + [ + f"{pkg_type}6", + package_rel_path, + package.package_name, + ], + 0, + ) + if package_struct is not None: + return (path, package_struct) + else: + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + print( + "WARNING: Unable to register unsupported file type {} " + "for model {}.".format(package.package_type, self.name) + ) + return None, None + + def load_package( + self, + ftype, + fname, + pname, + strict, + ref_path, + dict_package_name=None, + parent_package: Optional[MFPackage] = None, + ): + """ + Loads a package from a file. This method is used internally by FloPy + and is not intended for the end user. + + Parameters + ---------- + ftype : str + the file type + fname : str + the name of the file containing the package input + pname : str + the user-defined name for the package + strict : bool + strict mode when loading the file + ref_path : str + path to the file. uses local path if set to None + dict_package_name : str + package name for dictionary lookup + parent_package : MFPackage + parent package + + Examples + -------- + """ + if ref_path is not None: + fname = os.path.join(ref_path, fname) + sim_struct = mfstructure.MFStructure().sim_struct + if ( + ftype in self.structure.package_struct_objs + and self.structure.package_struct_objs[ftype].multi_package_support + ) or ( + ftype in sim_struct.utl_struct_objs + and sim_struct.utl_struct_objs[ftype].multi_package_support + ): + # resolve dictionary name for package + if dict_package_name is not None: + if parent_package is not None: + dict_package_name = f"{parent_package.path[-1]}_{ftype}" + else: + # use dict_package_name as the base name + if ftype in self._ftype_num_dict: + self._ftype_num_dict[dict_package_name] += 1 + else: + self._ftype_num_dict[dict_package_name] = 0 + dict_package_name = "{}_{}".format( + dict_package_name, + self._ftype_num_dict[dict_package_name], + ) + else: + # use ftype as the base name + if ftype in self._ftype_num_dict: + self._ftype_num_dict[ftype] += 1 + else: + self._ftype_num_dict[ftype] = 1 + if pname is not None: + dict_package_name = pname + else: + dict_package_name = f"{ftype}-{self._ftype_num_dict[ftype]}" + else: + dict_package_name = ftype + + # clean up model type text + model_type = self.structure.model_type + while datautil.DatumUtil.is_int(model_type[-1]): + model_type = model_type[0:-1] + + # create package + package_obj = PackageContainer.package_factory(ftype, model_type) + package = package_obj( + self, + filename=fname, + pname=dict_package_name, + loading_package=True, + parent_file=parent_package, + _internal_package=True, + ) + try: + package.load(strict) + except ReadAsArraysException: + # create ReadAsArrays package and load it instead + package_obj = PackageContainer.package_factory(f"{ftype}a", model_type) + package = package_obj( + self, + filename=fname, + pname=dict_package_name, + loading_package=True, + parent_file=parent_package, + _internal_package=True, + ) + package.load(strict) + + # register child package with the model + self._package_container.add_package(package) + if parent_package is not None: + # register child package with the parent package + parent_package.add_package(package) + + return package + + def plot(self, SelPackList=None, **kwargs): + """ + Plot 2-D, 3-D, transient 2-D, and stress period list (MfList) + model input data from a model instance + + Args: + model: Flopy model instance + SelPackList: (list) list of package names to plot, if none + all packages will be plotted + + **kwargs : dict + filename_base : str + Base file name that will be used to automatically generate file + names for output image files. Plots will be exported as image + files if file_name_base is not None. (default is None) + file_extension : str + Valid matplotlib.pyplot file extension for savefig(). Only used + if filename_base is not None. (default is 'png') + mflay : int + MODFLOW zero-based layer number to return. If None, then all + all layers will be included. (default is None) + kper : int + MODFLOW zero-based stress period number to return. + (default is zero) + key : str + MfList dictionary key. (default is None) + + Returns: + axes : list + Empty list is returned if filename_base is not None. Otherwise + a list of matplotlib.pyplot.axis are returned. + """ + from ..plot.plotutil import PlotUtilities + + axes = PlotUtilities._plot_model_helper(self, SelPackList=SelPackList, **kwargs) + + return axes + + @staticmethod + def _resolve_idomain(idomain, botm): + if idomain is None: + if botm is None: + return idomain + else: + return np.ones_like(botm) + return idomain diff --git a/flopy/mf6/tmp/ruff/mfpackage.py b/flopy/mf6/tmp/ruff/mfpackage.py new file mode 100644 index 0000000000..6340552af9 --- /dev/null +++ b/flopy/mf6/tmp/ruff/mfpackage.py @@ -0,0 +1,3497 @@ +import copy +import datetime +import errno +import inspect +import os +import sys +import warnings + +import numpy as np + +from ..mbase import ModelInterface +from ..pakbase import PackageInterface +from ..utils import datautil +from ..utils.check import mf6check +from ..version import __version__ +from .coordinates import modeldimensions +from .data import ( + mfdata, + mfdataarray, + mfdatalist, + mfdataplist, + mfdatascalar, + mfstructure, +) +from .data.mfdatautil import DataSearchOutput, MFComment, cellids_equal +from .data.mfstructure import DatumType, MFDataItemStructure, MFStructure +from .mfbase import ( + ExtFileAction, + FlopyException, + MFDataException, + MFFileMgmt, + MFInvalidTransientBlockHeaderException, + PackageContainer, + PackageContainerType, + ReadAsArraysException, + VerbosityLevel, +) +from .utils.output_util import MF6Output + + +class MFBlockHeader: + """ + Represents the header of a block in a MF6 input file. This class is used + internally by FloPy and its direct use by a user of this library is not + recommend. + + Parameters + ---------- + name : str + Block name + variable_strings : list + List of strings that appear after the block name + comment : MFComment + Comment text in the block header + + Attributes + ---------- + name : str + Block name + variable_strings : list + List of strings that appear after the block name + comment : MFComment + Comment text in the block header + data_items : list + List of MFVariable of the variables contained in this block + + """ + + def __init__( + self, + name, + variable_strings, + comment, + simulation_data=None, + path=None, + block=None, + ): + self.name = name + self.variable_strings = variable_strings + self.block = block + if not ( + (simulation_data is None and path is None) + or (simulation_data is not None and path is not None) + ): + raise FlopyException( + "Block header must be initialized with both " + "simulation_data and path or with neither." + ) + if simulation_data is None: + self.comment = comment + self.simulation_data = None + self.path = path + self.comment_path = None + else: + self.connect_to_dict(simulation_data, path, comment) + # TODO: Get data_items from dictionary + self.data_items = [] + # build block comment paths + self.blk_trailing_comment_path = ("blk_trailing_comment",) + self.blk_post_comment_path = ("blk_post_comment",) + if isinstance(path, list): + path = tuple(path) + if path is not None: + self.blk_trailing_comment_path = path + ( + name, + "blk_trailing_comment", + ) + self.blk_post_comment_path = path + ( + name, + "blk_post_comment", + ) + if self.blk_trailing_comment_path not in simulation_data.mfdata: + simulation_data.mfdata[self.blk_trailing_comment_path] = MFComment( + "", "", simulation_data, 0 + ) + if self.blk_post_comment_path not in simulation_data.mfdata: + simulation_data.mfdata[self.blk_post_comment_path] = MFComment( + "\n", "", simulation_data, 0 + ) + else: + self.blk_trailing_comment_path = ("blk_trailing_comment",) + self.blk_post_comment_path = ("blk_post_comment",) + + def __lt__(self, other): + transient_key = self.get_transient_key() + if transient_key is None: + return True + else: + other_key = other.get_transient_key() + if other_key is None: + return False + else: + return transient_key < other_key + + def build_header_variables( + self, + simulation_data, + block_header_structure, + block_path, + data, + dimensions, + ): + """Builds data objects to hold header variables.""" + self.data_items = [] + var_path = block_path + (block_header_structure[0].name,) + + # fix up data + fixed_data = [] + if block_header_structure[0].data_item_structures[0].type == DatumType.keyword: + data_item = block_header_structure[0].data_item_structures[0] + fixed_data.append(data_item.name) + if isinstance(data, tuple): + data = list(data) + if isinstance(data, list): + fixed_data = fixed_data + data + else: + fixed_data.append(data) + if len(fixed_data) > 0: + fixed_data = [tuple(fixed_data)] + # create data object + new_data = self.block.data_factory( + simulation_data, + None, + block_header_structure[0], + True, + var_path, + dimensions, + fixed_data, + ) + + self.add_data_item(new_data, data) + + def add_data_item(self, new_data, data): + """Adds data to the block.""" + self.data_items.append(new_data) + while isinstance(data, list): + if len(data) > 0: + data = data[0] + else: + data = None + if not isinstance(data, tuple): + data = (data,) + self.blk_trailing_comment_path += data + self.blk_post_comment_path += data + + def is_same_header(self, block_header): + """Checks if `block_header` is the same header as this header.""" + if len(self.variable_strings) > 0: + if len(self.variable_strings) != len(block_header.variable_strings): + return False + else: + for sitem, oitem in zip( + self.variable_strings, block_header.variable_strings + ): + if sitem != oitem: + return False + return True + elif len(self.data_items) > 0 and len(block_header.variable_strings) > 0: + typ_obj = self.data_items[0].structure.data_item_structures[0].type_obj + if typ_obj == int or typ_obj == float: + return bool( + self.variable_strings[0] == block_header.variable_strings[0] + ) + else: + return True + elif len(self.data_items) == len(block_header.variable_strings): + return True + return False + + def get_comment(self): + """Get block header comment""" + if self.simulation_data is None: + return self.comment + else: + return self.simulation_data.mfdata[self.comment_path] + + def connect_to_dict(self, simulation_data, path, comment=None): + """Add comment to the simulation dictionary""" + self.simulation_data = simulation_data + self.path = path + self.comment_path = path + ("blk_hdr_comment",) + if comment is None: + simulation_data.mfdata[self.comment_path] = self.comment + else: + simulation_data.mfdata[self.comment_path] = comment + self.comment = None + + def write_header(self, fd): + """Writes block header to file object `fd`. + + Parameters + ---------- + fd : file object + File object to write block header to. + + """ + fd.write(f"BEGIN {self.name}") + if len(self.data_items) > 0: + if isinstance(self.data_items[0], mfdatascalar.MFScalar): + one_based = self.data_items[0].structure.type == DatumType.integer + entry = self.data_items[0].get_file_entry( + values_only=True, one_based=one_based + ) + else: + entry = self.data_items[0].get_file_entry() + fd.write(str(entry.rstrip())) + if len(self.data_items) > 1: + for data_item in self.data_items[1:]: + entry = data_item.get_file_entry(values_only=True) + fd.write(str(entry).rstrip()) + if self.get_comment().text: + fd.write(" ") + self.get_comment().write(fd) + fd.write("\n") + + def write_footer(self, fd): + """Writes block footer to file object `fd`. + + Parameters + ---------- + fd : file object + File object to write block footer to. + + """ + fd.write(f"END {self.name}") + if len(self.data_items) > 0: + one_based = self.data_items[0].structure.type == DatumType.integer + if isinstance(self.data_items[0], mfdatascalar.MFScalar): + entry = self.data_items[0].get_file_entry( + values_only=True, one_based=one_based + ) + else: + entry = self.data_items[0].get_file_entry() + fd.write(str(entry.rstrip())) + fd.write("\n") + + def get_transient_key(self, data_path=None): + """Get transient key associated with this block header.""" + transient_key = None + for index in range(0, len(self.data_items)): + if self.data_items[index].structure.type != DatumType.keyword: + if data_path == self.data_items[index].path: + # avoid infinite recursion + return True + transient_key = self.data_items[index].get_data() + if isinstance(transient_key, np.recarray): + item_struct = self.data_items[index].structure + key_index = item_struct.first_non_keyword_index() + if not ( + key_index is not None and len(transient_key[0]) > key_index + ): + if key_index is None: + raise FlopyException( + "Block header index could not be determined." + ) + else: + raise FlopyException( + 'Block header index "{}" ' + 'must be less than "{}"' + ".".format(key_index, len(transient_key[0])) + ) + transient_key = transient_key[0][key_index] + break + return transient_key + + +class MFBlock: + """ + Represents a block in a MF6 input file. This class is used internally + by FloPy and use by users of the FloPy library is not recommended. + + Parameters + ---------- + simulation_data : MFSimulationData + Data specific to this simulation + dimensions : MFDimensions + Describes model dimensions including model grid and simulation time + structure : MFVariableStructure + Structure describing block + path : tuple + Unique path to block + + Attributes + ---------- + block_headers : MFBlockHeader + Block header text (BEGIN/END), header variables, comments in the + header + structure : MFBlockStructure + Structure describing block + path : tuple + Unique path to block + datasets : OrderDict + Dictionary of dataset objects with keys that are the name of the + dataset + datasets_keyword : dict + Dictionary of dataset objects with keys that are key words to identify + start of dataset + enabled : bool + If block is being used in the simulation + + """ + + def __init__( + self, + simulation_data, + dimensions, + structure, + path, + model_or_sim, + container_package, + ): + self._simulation_data = simulation_data + self._dimensions = dimensions + self._model_or_sim = model_or_sim + self._container_package = container_package + self.block_headers = [ + MFBlockHeader( + structure.name, + [], + MFComment("", path, simulation_data, 0), + simulation_data, + path, + self, + ) + ] + self.structure = structure + self.path = path + self.datasets = {} + self.datasets_keyword = {} + # initially disable if optional + self.enabled = structure.number_non_optional_data() > 0 + self.loaded = False + self.external_file_name = None + self._structure_init() + + def __repr__(self): + return self._get_data_str(True) + + def __str__(self): + return self._get_data_str(False) + + def _get_data_str(self, formal): + data_str = "" + for dataset in self.datasets.values(): + if formal: + ds_repr = repr(dataset) + if len(ds_repr.strip()) > 0: + data_str = f"{data_str}{dataset.structure.name}\n{dataset!r}\n" + else: + ds_str = str(dataset) + if len(ds_str.strip()) > 0: + data_str = f"{data_str}{dataset.structure.name}\n{dataset!s}\n" + return data_str + + # return an MFScalar, MFList, or MFArray + def data_factory( + self, + sim_data, + model_or_sim, + structure, + enable, + path, + dimensions, + data=None, + package=None, + ): + """Creates the appropriate data child object derived from MFData.""" + data_type = structure.get_datatype() + # examine the data structure and determine the data type + if ( + data_type == mfstructure.DataType.scalar_keyword + or data_type == mfstructure.DataType.scalar + ): + return mfdatascalar.MFScalar( + sim_data, + model_or_sim, + structure, + data, + enable, + path, + dimensions, + ) + elif ( + data_type == mfstructure.DataType.scalar_keyword_transient + or data_type == mfstructure.DataType.scalar_transient + ): + trans_scalar = mfdatascalar.MFScalarTransient( + sim_data, model_or_sim, structure, enable, path, dimensions + ) + if data is not None: + trans_scalar.set_data(data, key=0) + return trans_scalar + elif data_type == mfstructure.DataType.array: + return mfdataarray.MFArray( + sim_data, + model_or_sim, + structure, + data, + enable, + path, + dimensions, + self, + ) + elif data_type == mfstructure.DataType.array_transient: + trans_array = mfdataarray.MFTransientArray( + sim_data, + model_or_sim, + structure, + enable, + path, + dimensions, + self, + ) + if data is not None: + trans_array.set_data(data, key=0) + return trans_array + elif data_type == mfstructure.DataType.list: + if ( + structure.basic_item + and self._container_package.package_type.lower() != "nam" + and self._simulation_data.use_pandas + ): + return mfdataplist.MFPandasList( + sim_data, + model_or_sim, + structure, + data, + enable, + path, + dimensions, + package, + self, + ) + else: + return mfdatalist.MFList( + sim_data, + model_or_sim, + structure, + data, + enable, + path, + dimensions, + package, + self, + ) + elif data_type == mfstructure.DataType.list_transient: + if structure.basic_item and self._simulation_data.use_pandas: + trans_list = mfdataplist.MFPandasTransientList( + sim_data, + model_or_sim, + structure, + enable, + path, + dimensions, + package, + self, + ) + else: + trans_list = mfdatalist.MFTransientList( + sim_data, + model_or_sim, + structure, + enable, + path, + dimensions, + package, + self, + ) + if data is not None: + trans_list.set_data(data, key=0, autofill=True) + return trans_list + elif data_type == mfstructure.DataType.list_multiple: + mult_list = mfdatalist.MFMultipleList( + sim_data, + model_or_sim, + structure, + enable, + path, + dimensions, + package, + self, + ) + if data is not None: + mult_list.set_data(data, key=0, autofill=True) + return mult_list + + def _structure_init(self): + # load datasets keywords into dictionary + for dataset_struct in self.structure.data_structures.values(): + for keyword in dataset_struct.get_keywords(): + self.datasets_keyword[keyword] = dataset_struct + # load block header data items into dictionary + for dataset in self.structure.block_header_structure: + self._new_dataset(dataset.name, dataset, True, None) + + def set_model_relative_path(self, model_ws): + """Sets `model_ws` as the model path relative to the simulation's + path. + + Parameters + ---------- + model_ws : str + Model path relative to the simulation's path. + """ + # update datasets + for key, dataset in self.datasets.items(): + if dataset.structure.file_data: + try: + file_data = dataset.get_data() + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message="Error occurred while " + "getting file data from " + '"{}"'.format(dataset.structure.name), + ) + if file_data: + # update file path location for all file paths + for file_line in file_data: + old_file_name = os.path.split(file_line[0])[1] + file_line[0] = os.path.join(model_ws, old_file_name) + # update block headers + for block_header in self.block_headers: + for dataset in block_header.data_items: + if dataset.structure.file_data: + try: + file_data = dataset.get_data() + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message="Error occurred while " + "getting file data from " + '"{}"'.format(dataset.structure.name), + ) + + if file_data: + # update file path location for all file paths + for file_line in file_data: + old_file_path, old_file_name = os.path.split(file_line[1]) + new_file_path = os.path.join(model_ws, old_file_name) + # update transient keys of datasets within the + # block + for key, idataset in self.datasets.items(): + if isinstance(idataset, mfdata.MFTransient): + idataset.update_transient_key( + file_line[1], new_file_path + ) + file_line[1] = os.path.join(model_ws, old_file_name) + + def add_dataset(self, dataset_struct, data, var_path): + """Add data to this block.""" + try: + self.datasets[var_path[-1]] = self.data_factory( + self._simulation_data, + self._model_or_sim, + dataset_struct, + True, + var_path, + self._dimensions, + data, + self._container_package, + ) + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message='Error occurred while adding dataset "{}" to block "{}"'.format( + dataset_struct.name, self.structure.name + ), + ) + + self._simulation_data.mfdata[var_path] = self.datasets[var_path[-1]] + dtype = dataset_struct.get_datatype() + if ( + dtype == mfstructure.DataType.list_transient + or dtype == mfstructure.DataType.list_multiple + or dtype == mfstructure.DataType.array_transient + ): + # build repeating block header(s) + if isinstance(data, dict): + # Add block headers for each dictionary key + for index in data: + if isinstance(index, tuple): + header_list = list(index) + else: + header_list = [index] + self._build_repeating_header(header_list) + elif isinstance(data, list): + # Add a single block header of value 0 + self._build_repeating_header([0]) + elif dtype != mfstructure.DataType.list_multiple and data is not None: + self._build_repeating_header([[0]]) + + return self.datasets[var_path[-1]] + + def _build_repeating_header(self, header_data): + if self.header_exists(header_data[0]): + return + if ( + len(self.block_headers[-1].data_items) == 1 + and self.block_headers[-1].data_items[0].get_data() is not None + ): + block_header_path = self.path + (len(self.block_headers) + 1,) + block_header = MFBlockHeader( + self.structure.name, + [], + MFComment("", self.path, self._simulation_data, 0), + self._simulation_data, + block_header_path, + self, + ) + self.block_headers.append(block_header) + else: + block_header_path = self.path + (len(self.block_headers),) + + struct = self.structure + last_header = self.block_headers[-1] + try: + last_header.build_header_variables( + self._simulation_data, + struct.block_header_structure, + block_header_path, + header_data, + self._dimensions, + ) + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message="Error occurred while building" + " block header variables for block " + '"{}"'.format(last_header.name), + ) + + def _new_dataset(self, key, dataset_struct, block_header=False, initial_val=None): + dataset_path = self.path + (key,) + if block_header: + if ( + dataset_struct.type == DatumType.integer + and initial_val is not None + and len(initial_val) >= 1 + and dataset_struct.get_record_size()[0] == 1 + ): + # stress periods are stored 0 based + initial_val = int(initial_val[0]) - 1 + if isinstance(initial_val, list): + initial_val_path = tuple(initial_val) + initial_val = [tuple(initial_val)] + else: + initial_val_path = initial_val + try: + new_data = self.data_factory( + self._simulation_data, + self._model_or_sim, + dataset_struct, + True, + dataset_path, + self._dimensions, + initial_val, + self._container_package, + ) + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message="Error occurred while adding" + ' dataset "{}" to block ' + '"{}"'.format(dataset_struct.name, self.structure.name), + ) + self.block_headers[-1].add_data_item(new_data, initial_val_path) + + else: + try: + self.datasets[key] = self.data_factory( + self._simulation_data, + self._model_or_sim, + dataset_struct, + True, + dataset_path, + self._dimensions, + initial_val, + self._container_package, + ) + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message="Error occurred while adding" + ' dataset "{}" to block ' + '"{}"'.format(dataset_struct.name, self.structure.name), + ) + for keyword in dataset_struct.get_keywords(): + self.datasets_keyword[keyword] = dataset_struct + + def is_empty(self): + """Returns true if this block is empty.""" + for key, dataset in self.datasets.items(): + try: + has_data = dataset.has_data() + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message="Error occurred while verifying" + ' data of dataset "{}" in block ' + '"{}"'.format(dataset.structure.name, self.structure.name), + ) + + if has_data is not None and has_data: + return False + return True + + def load(self, block_header, fd, strict=True): + """Loads block from file object. file object must be advanced to + beginning of block before calling. + + Parameters + ---------- + block_header : MFBlockHeader + Block header for block block being loaded. + fd : file + File descriptor of file being loaded + strict : bool + Enforce strict MODFLOW 6 file format. + """ + # verify number of header variables + if ( + len(block_header.variable_strings) + < self.structure.number_non_optional_block_header_data() + ): + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + warning_str = ( + 'WARNING: Block header for block "{}" does not ' + "contain the correct number of " + "variables {}".format(block_header.name, self.path) + ) + print(warning_str) + return + + if self.loaded: + # verify header has not already been loaded + for bh_current in self.block_headers: + if bh_current.is_same_header(block_header): + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + warning_str = ( + 'WARNING: Block header for block "{}" is ' + "not a unique block header " + "{}".format(block_header.name, self.path) + ) + print(warning_str) + return + + # init + self.enabled = True + if not self.loaded: + self.block_headers = [] + block_header.block = self + self.block_headers.append(block_header) + + # process any header variable + if len(self.structure.block_header_structure) > 0: + dataset = self.structure.block_header_structure[0] + self._new_dataset( + dataset.name, + dataset, + True, + self.block_headers[-1].variable_strings, + ) + + # handle special readasarrays case + if self._container_package.structure.read_as_arrays or ( + hasattr(self._container_package, "aux") + and self._container_package.aux.structure.layered + ): + # auxiliary variables may appear with aux variable name as keyword + aux_vars = self._container_package.auxiliary.get_data() + if aux_vars is not None: + for var_name in list(aux_vars[0])[1:]: + self.datasets_keyword[(var_name,)] = ( + self._container_package.aux.structure + ) + + comments = [] + + # capture any initial comments + initial_comment = MFComment("", "", 0) + fd_block = fd + line = fd_block.readline() + datautil.PyListUtil.reset_delimiter_used() + arr_line = datautil.PyListUtil.split_data_line(line) + post_data_comments = MFComment("", "", self._simulation_data, 0) + while MFComment.is_comment(line, True): + initial_comment.add_text(line) + line = fd_block.readline() + arr_line = datautil.PyListUtil.split_data_line(line) + + # if block not empty + external_file_info = None + if not (len(arr_line[0]) > 2 and arr_line[0][:3].upper() == "END"): + if arr_line[0].lower() == "open/close": + # open block contents from external file + fd_block.readline() + root_path = self._simulation_data.mfpath.get_sim_path() + try: + file_name = os.path.split(arr_line[1])[-1] + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print(f' opening external file "{file_name}"...') + external_file_info = arr_line + except: + type_, value_, traceback_ = sys.exc_info() + message = f'Error reading external file specified in line "{line}"' + raise MFDataException( + self._container_package.model_name, + self._container_package._get_pname(), + self.path, + "reading external file", + self.structure.name, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self._simulation_data.debug, + ) + if len(self.structure.data_structures) <= 1: + # load a single data set + dataset = self.datasets[next(iter(self.datasets))] + try: + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print(f" loading data {dataset.structure.name}...") + next_line = dataset.load( + line, + fd_block, + self.block_headers[-1], + initial_comment, + external_file_info, + ) + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message='Error occurred while loading data "{}" in ' + 'block "{}" from file "{}"' + ".".format( + dataset.structure.name, + self.structure.name, + fd_block.name, + ), + ) + package_info_list = self._get_package_info(dataset) + if package_info_list is not None: + for package_info in package_info_list: + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print(f" loading child package {package_info[0]}...") + fname = package_info[1] + if package_info[2] is not None: + fname = os.path.join(package_info[2], fname) + filemgr = self._simulation_data.mfpath + fname = filemgr.strip_model_relative_path( + self._model_or_sim.name, fname + ) + pkg = self._model_or_sim.load_package( + package_info[0], + fname, + package_info[1], + True, + "", + package_info[3], + self._container_package, + ) + if hasattr(self._container_package, package_info[0]): + package_group = getattr( + self._container_package, package_info[0] + ) + package_group._append_package(pkg, pkg.filename, False) + + if next_line[1] is not None: + arr_line = datautil.PyListUtil.split_data_line(next_line[1]) + else: + arr_line = "" + # capture any trailing comments + dataset.post_data_comments = post_data_comments + while arr_line and ( + len(next_line[1]) <= 2 or arr_line[0][:3].upper() != "END" + ): + next_line[1] = fd_block.readline().strip() + arr_line = datautil.PyListUtil.split_data_line(next_line[1]) + if arr_line and ( + len(next_line[1]) <= 2 or arr_line[0][:3].upper() != "END" + ): + post_data_comments.add_text(" ".join(arr_line)) + else: + # look for keyword and store line as data or comment + try: + key, results = self._find_data_by_keyword( + line, fd_block, initial_comment + ) + except MFInvalidTransientBlockHeaderException as e: + warning_str = f"WARNING: {e}" + print(warning_str) + self.block_headers.pop() + return + + self._save_comments(arr_line, line, key, comments) + if results[1] is None or results[1][:3].upper() != "END": + # block consists of unordered datasets + # load the data sets out of order based on + # initial constants + line = " " + while line != "": + line = fd_block.readline() + arr_line = datautil.PyListUtil.split_data_line(line) + if arr_line: + # determine if at end of block + if ( + len(arr_line[0]) > 2 + and arr_line[0][:3].upper() == "END" + ): + break + # look for keyword and store line as data o + # r comment + key, result = self._find_data_by_keyword( + line, fd_block, initial_comment + ) + self._save_comments(arr_line, line, key, comments) + if result[1] is not None and result[1][:3].upper() == "END": + break + else: + # block empty, store empty array in block variables + empty_arr = [] + for ds in self.datasets.values(): + if isinstance(ds, mfdata.MFTransient): + transient_key = block_header.get_transient_key() + ds.set_data(empty_arr, key=transient_key) + self.loaded = True + self.is_valid() + + def _find_data_by_keyword(self, line, fd, initial_comment): + first_key = None + nothing_found = False + next_line = [True, line] + while next_line[0] and not nothing_found: + arr_line = datautil.PyListUtil.split_data_line(next_line[1]) + key = datautil.find_keyword(arr_line, self.datasets_keyword) + if key is not None: + ds_name = self.datasets_keyword[key].name + try: + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print(f" loading data {ds_name}...") + next_line = self.datasets[ds_name].load( + next_line[1], + fd, + self.block_headers[-1], + initial_comment, + ) + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message="Error occurred while " + 'loading data "{}" in ' + 'block "{}" from file "{}"' + ".".format(ds_name, self.structure.name, fd.name), + ) + + # see if first item's name indicates a reference to + # another package + package_info_list = self._get_package_info(self.datasets[ds_name]) + if package_info_list is not None: + for package_info in package_info_list: + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print(f" loading child package {package_info[1]}...") + fname = package_info[1] + if package_info[2] is not None: + fname = os.path.join(package_info[2], fname) + filemgr = self._simulation_data.mfpath + fname = filemgr.strip_model_relative_path( + self._model_or_sim.name, fname + ) + pkg = self._model_or_sim.load_package( + package_info[0], + fname, + package_info[1], + True, + "", + package_info[3], + self._container_package, + ) + if hasattr(self._container_package, package_info[0]): + package_group = getattr( + self._container_package, package_info[0] + ) + package_group._append_package(pkg, pkg.filename, False) + if first_key is None: + first_key = key + nothing_found = False + elif ( + arr_line[0].lower() == "readasarrays" + and self.path[-1].lower() == "options" + and self._container_package.structure.read_as_arrays is False + ): + error_msg = ( + "ERROR: Attempting to read a ReadAsArrays " + "package as a non-ReadAsArrays " + "package {}".format(self.path) + ) + raise ReadAsArraysException(error_msg) + else: + nothing_found = True + + if first_key is None: + # look for recarrays. if there is a lone recarray in this block, + # use it by default + recarrays = self.structure.get_all_recarrays() + if len(recarrays) != 1: + return key, [None, None] + dataset = self.datasets[recarrays[0].name] + ds_result = dataset.load(line, fd, self.block_headers[-1], initial_comment) + + # see if first item's name indicates a reference to another + # package + package_info_list = self._get_package_info(dataset) + if package_info_list is not None: + for package_info in package_info_list: + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print(f" loading child package {package_info[0]}...") + fname = package_info[1] + if package_info[2] is not None: + fname = os.path.join(package_info[2], fname) + filemgr = self._simulation_data.mfpath + fname = filemgr.strip_model_relative_path( + self._model_or_sim.name, fname + ) + pkg = self._model_or_sim.load_package( + package_info[0], + fname, + None, + True, + "", + package_info[3], + self._container_package, + ) + if hasattr(self._container_package, package_info[0]): + package_group = getattr( + self._container_package, package_info[0] + ) + package_group._append_package(pkg, pkg.filename, False) + + return recarrays[0].keyword, ds_result + else: + return first_key, next_line + + def _get_package_info(self, dataset): + if not dataset.structure.file_data: + return None + for index in range(0, len(dataset.structure.data_item_structures)): + data_item = dataset.structure.data_item_structures[index] + if ( + data_item.type == DatumType.keyword + or data_item.type == DatumType.string + ): + item_name = data_item.name + package_type = item_name[:-1] + model_type = self._model_or_sim.structure.model_type + # not all packages have the same naming convention + # try different naming conventions to find the appropriate + # package + package_types = [ + package_type, + f"{self._container_package.package_type}{package_type}", + ] + package_type_found = None + for ptype in package_types: + if PackageContainer.package_factory(ptype, model_type) is not None: + package_type_found = ptype + break + if package_type_found is not None: + try: + data = dataset.get_data() + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message="Error occurred while " + 'getting data from "{}" ' + 'in block "{}".'.format( + dataset.structure.name, self.structure.name + ), + ) + package_info_list = [] + if isinstance(data, np.recarray): + for row in data: + self._add_to_info_list( + package_info_list, + row[index], + package_type_found, + ) + else: + self._add_to_info_list( + package_info_list, data, package_type_found + ) + + return package_info_list + return None + + def _add_to_info_list(self, package_info_list, file_location, package_type_found): + file_path, file_name = os.path.split(file_location) + dict_package_name = f"{package_type_found}_{self.path[-2]}" + package_info_list.append( + ( + package_type_found, + file_name, + file_path, + dict_package_name, + ) + ) + + def _save_comments(self, arr_line, line, key, comments): + # FIX: Save these comments somewhere in the data set + if key not in self.datasets_keyword: + if MFComment.is_comment(key, True): + if comments: + comments.append("\n") + comments.append(arr_line) + + def write(self, fd, ext_file_action=ExtFileAction.copy_relative_paths): + """Writes block to a file object. + + Parameters + ---------- + fd : file object + File object to write to. + + """ + # never write an empty block + is_empty = self.is_empty() + if ( + is_empty + and self.structure.name.lower() != "exchanges" + and self.structure.name.lower() != "options" + and self.structure.name.lower() != "sources" + and self.structure.name.lower() != "stressperioddata" + ): + return + if self.structure.repeating(): + repeating_datasets = self._find_repeating_datasets() + for repeating_dataset in repeating_datasets: + # resolve any missing block headers + self._add_missing_block_headers(repeating_dataset) + for block_header in sorted(self.block_headers): + # write block + self._write_block(fd, block_header, ext_file_action) + else: + self._write_block(fd, self.block_headers[0], ext_file_action) + + def _add_missing_block_headers(self, repeating_dataset): + key_data_list = repeating_dataset.get_active_key_list() + # assemble a dictionary of data keys and empty keys + key_dict = {} + for key in key_data_list: + key_dict[key[0]] = True + for key, value in repeating_dataset.empty_keys.items(): + if value: + key_dict[key] = True + for key in key_dict.keys(): + has_data = repeating_dataset.has_data(key) + empty_key = ( + key in repeating_dataset.empty_keys + and repeating_dataset.empty_keys[key] + ) + if not self.header_exists(key) and (has_data or empty_key): + self._build_repeating_header([key]) + + def header_exists(self, key, data_path=None): + if not isinstance(key, list): + if key is None: + return + comp_key_list = [key] + else: + comp_key_list = key + for block_header in self.block_headers: + transient_key = block_header.get_transient_key(data_path) + if transient_key is True: + return + for comp_key in comp_key_list: + if transient_key is not None and transient_key == comp_key: + return True + return False + + def set_all_data_external( + self, + base_name, + check_data=True, + external_data_folder=None, + binary=False, + ): + """Sets the block's list and array data to be stored externally, + base_name is external file name's prefix, check_data determines + if data error checking is enabled during this process. + + Warning + ------- + The MF6 check mechanism is deprecated pending reimplementation + in a future release. While the checks API will remain in place + through 3.x, it may be unstable, and will likely change in 4.x. + + Parameters + ---------- + base_name : str + Base file name of external files where data will be written to. + check_data : bool + Whether to do data error checking. + external_data_folder + Folder where external data will be stored + binary: bool + Whether file will be stored as binary + + """ + + for key, dataset in self.datasets.items(): + lst_data = isinstance(dataset, mfdatalist.MFList) or isinstance( + dataset, mfdataplist.MFPandasList + ) + if ( + isinstance(dataset, mfdataarray.MFArray) + or (lst_data and dataset.structure.type == DatumType.recarray) + and dataset.enabled + ): + if not binary or ( + lst_data + and ( + dataset.data_dimensions.package_dim.boundnames() + or not dataset.structure.basic_item + ) + ): + ext = "txt" + binary = False + else: + ext = "bin" + file_path = f"{base_name}_{dataset.structure.name}.{ext}" + replace_existing_external = False + if external_data_folder is not None: + # get simulation root path + root_path = self._simulation_data.mfpath.get_sim_path() + # get model relative path, if it exists + if isinstance(self._model_or_sim, ModelInterface): + name = self._model_or_sim.name + rel_path = self._simulation_data.mfpath.model_relative_path[ + name + ] + if rel_path is not None: + root_path = os.path.join(root_path, rel_path) + full_path = os.path.join(root_path, external_data_folder) + if not os.path.exists(full_path): + # create new external data folder + os.makedirs(full_path) + file_path = os.path.join(external_data_folder, file_path) + replace_existing_external = True + dataset.store_as_external_file( + file_path, + replace_existing_external=replace_existing_external, + check_data=check_data, + binary=binary, + ) + + def set_all_data_internal(self, check_data=True): + """Sets the block's list and array data to be stored internally, + check_data determines if data error checking is enabled during this + process. + + Warning + ------- + The MF6 check mechanism is deprecated pending reimplementation + in a future release. While the checks API will remain in place + through 3.x, it may be unstable, and will likely change in 4.x. + + Parameters + ---------- + check_data : bool + Whether to do data error checking. + + """ + + for key, dataset in self.datasets.items(): + if ( + isinstance(dataset, mfdataarray.MFArray) + or ( + ( + isinstance(dataset, mfdatalist.MFList) + or isinstance(dataset, mfdataplist.MFPandasList) + ) + and dataset.structure.type == DatumType.recarray + ) + and dataset.enabled + ): + dataset.store_internal(check_data=check_data) + + def _find_repeating_datasets(self): + repeating_datasets = [] + for key, dataset in self.datasets.items(): + if dataset.repeating: + repeating_datasets.append(dataset) + return repeating_datasets + + def _prepare_external(self, fd, file_name, binary=False): + fd_main = fd + fd_path = self._simulation_data.mfpath.get_model_path(self.path[0]) + # resolve full file and folder path + fd_file_path = os.path.join(fd_path, file_name) + fd_folder_path = os.path.split(fd_file_path)[0] + if fd_folder_path != "": + if not os.path.exists(fd_folder_path): + # create new external data folder + os.makedirs(fd_folder_path) + return fd_main, fd_file_path + + def _write_block(self, fd, block_header, ext_file_action): + transient_key = None + basic_list = False + dataset_one = list(self.datasets.values())[0] + if isinstance( + dataset_one, + (mfdataplist.MFPandasList, mfdataplist.MFPandasTransientList), + ): + basic_list = True + for dataset in self.datasets.values(): + assert isinstance( + dataset, + ( + mfdataplist.MFPandasList, + mfdataplist.MFPandasTransientList, + ), + ) + # write block header + block_header.write_header(fd) + if len(block_header.data_items) > 0: + transient_key = block_header.get_transient_key() + + # gather data sets to write + data_set_output = [] + data_found = False + for key, dataset in self.datasets.items(): + try: + if transient_key is None: + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print(f" writing data {dataset.structure.name}...") + if basic_list: + ext_fname = dataset.external_file_name() + if ext_fname is not None: + binary = dataset.binary_ext_data() + # write block contents to external file + fd_main, fd = self._prepare_external(fd, ext_fname, binary) + dataset.write_file_entry(fd, fd_main=fd_main) + fd = fd_main + else: + dataset.write_file_entry(fd) + else: + data_set_output.append( + dataset.get_file_entry(ext_file_action=ext_file_action) + ) + data_found = True + else: + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print( + " writing data {} ({})...".format( + dataset.structure.name, transient_key + ) + ) + if basic_list: + ext_fname = dataset.external_file_name(transient_key) + if ext_fname is not None: + binary = dataset.binary_ext_data(transient_key) + # write block contents to external file + fd_main, fd = self._prepare_external(fd, ext_fname, binary) + dataset.write_file_entry( + fd, + transient_key, + ext_file_action=ext_file_action, + fd_main=fd_main, + ) + fd = fd_main + else: + dataset.write_file_entry( + fd, + transient_key, + ext_file_action=ext_file_action, + ) + else: + if dataset.repeating: + output = dataset.get_file_entry( + transient_key, ext_file_action=ext_file_action + ) + if output is not None: + data_set_output.append(output) + data_found = True + else: + data_set_output.append( + dataset.get_file_entry(ext_file_action=ext_file_action) + ) + data_found = True + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message=( + "Error occurred while writing data " + f'"{dataset.structure.name}" in block ' + f'"{self.structure.name}" to file "{fd.name}"' + ), + ) + if not data_found: + return + if not basic_list: + # write block header + block_header.write_header(fd) + + if self.external_file_name is not None: + indent_string = self._simulation_data.indent_string + fd.write(f'{indent_string}open/close "{self.external_file_name}"\n') + # write block contents to external file + fd_main, fd = self._prepare_external(fd, self.external_file_name) + # write data sets + for output in data_set_output: + fd.write(output) + + # write trailing comments + pth = block_header.blk_trailing_comment_path + if pth in self._simulation_data.mfdata: + self._simulation_data.mfdata[pth].write(fd) + + if self.external_file_name is not None and not basic_list: + # switch back writing to package file + fd.close() + fd = fd_main + + # write block footer + block_header.write_footer(fd) + + # write post block comments + pth = block_header.blk_post_comment_path + if pth in self._simulation_data.mfdata: + self._simulation_data.mfdata[pth].write(fd) + + # write extra line if comments are off + if not self._simulation_data.comments_on: + fd.write("\n") + + def is_allowed(self): + """Determine if block is valid based on the values of dependent + MODFLOW variables.""" + if self.structure.variable_dependant_path: + # fill in empty part of the path with the current path + if len(self.structure.variable_dependant_path) == 3: + dependant_var_path = ( + self.path[0], + ) + self.structure.variable_dependant_path + elif len(self.structure.variable_dependant_path) == 2: + dependant_var_path = ( + self.path[0], + self.path[1], + ) + self.structure.variable_dependant_path + elif len(self.structure.variable_dependant_path) == 1: + dependant_var_path = ( + self.path[0], + self.path[1], + self.path[2], + ) + self.structure.variable_dependant_path + else: + dependant_var_path = None + + # get dependency + dependant_var = None + mf_data = self._simulation_data.mfdata + if dependant_var_path in mf_data: + dependant_var = mf_data[dependant_var_path] + + # resolve dependency + if self.structure.variable_value_when_active[0] == "Exists": + exists = self.structure.variable_value_when_active[1] + if dependant_var and exists.lower() == "true": + return True + elif not dependant_var and exists.lower() == "false": + return True + else: + return False + elif not dependant_var: + return False + elif self.structure.variable_value_when_active[0] == ">": + min_val = self.structure.variable_value_when_active[1] + if dependant_var > float(min_val): + return True + else: + return False + elif self.structure.variable_value_when_active[0] == "<": + max_val = self.structure.variable_value_when_active[1] + if dependant_var < float(max_val): + return True + else: + return False + return True + + def is_valid(self): + """ + Returns true if the block is valid. + """ + # check data sets + for dataset in self.datasets.values(): + # Non-optional datasets must be enabled + if not dataset.structure.optional and not dataset.enabled: + return False + # Enabled blocks must be valid + if dataset.enabled and not dataset.is_valid: + return False + # check variables + for block_header in self.block_headers: + for dataset in block_header.data_items: + # Non-optional datasets must be enabled + if not dataset.structure.optional and not dataset.enabled: + return False + # Enabled blocks must be valid + if dataset.enabled and not dataset.is_valid(): + return False + + +class MFPackage(PackageInterface): + """ + Provides an interface for the user to specify data to build a package. + + Parameters + ---------- + parent : MFModel, MFSimulation, or MFPackage + The parent model, simulation, or package containing this package + package_type : str + String defining the package type + filename : str or PathLike + Name or path of file where this package is stored + quoted_filename : str + Filename with quotes around it when there is a space in the name + pname : str + Package name + loading_package : bool + Whether or not to add this package to the parent container's package + list during initialization + + Attributes + ---------- + blocks : dict + Dictionary of blocks contained in this package by block name + path : tuple + Data dictionary path to this package + structure : PackageStructure + Describes the blocks and data contain in this package + dimensions : PackageDimension + Resolves data dimensions for data within this package + + """ + + def __init__( + self, + parent, + package_type, + filename=None, + pname=None, + loading_package=False, + **kwargs, + ): + parent_file = kwargs.pop("parent_file", None) + if isinstance(parent, MFPackage): + self.model_or_sim = parent.model_or_sim + self.parent_file = parent + elif parent_file is not None: + self.model_or_sim = parent + self.parent_file = parent_file + else: + self.model_or_sim = parent + self.parent_file = None + _internal_package = kwargs.pop("_internal_package", False) + if _internal_package: + self.internal_package = True + else: + self.internal_package = False + self._data_list = [] + self._package_type = package_type + if self.model_or_sim.type == "Model" and package_type.lower() != "nam": + self.model_name = self.model_or_sim.name + else: + self.model_name = None + + # a package must have a dfn_file_name + if not hasattr(self, "dfn_file_name"): + self.dfn_file_name = "" + + if self.model_or_sim.type != "Model" and self.model_or_sim.type != "Simulation": + message = ( + "Invalid model_or_sim parameter. Expecting either a " + 'model or a simulation. Instead type "{}" was ' + "given.".format(type(self.model_or_sim)) + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.model_name, + pname, + "", + "initializing package", + None, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self.model_or_sim.simulation_data.debug, + ) + + self._package_container = PackageContainer(self.model_or_sim.simulation_data) + self.simulation_data = self.model_or_sim.simulation_data + + self.blocks = {} + self.container_type = [] + self.loading_package = loading_package + if pname is not None: + if not isinstance(pname, str): + message = ( + "Invalid pname parameter. Expecting type str. " + 'Instead type "{}" was ' + "given.".format(type(pname)) + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.model_name, + pname, + "", + "initializing package", + None, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self.model_or_sim.simulation_data.debug, + ) + + self.package_name = pname.lower() + else: + self.package_name = None + + if filename is None: + if self.model_or_sim.type == "Simulation": + # filename uses simulation base name + base_name = os.path.basename(os.path.normpath(self.model_or_sim.name)) + self._filename = f"{base_name}.{package_type}" + else: + # filename uses model base name + self._filename = f"{self.model_or_sim.name}.{package_type}" + else: + if not isinstance(filename, (str, os.PathLike)): + message = ( + "Invalid fname parameter. Expecting type str. " + 'Instead type "{}" was ' + "given.".format(type(filename)) + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.model_name, + pname, + "", + "initializing package", + None, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self.model_or_sim.simulation_data.debug, + ) + self._filename = datautil.clean_filename(str(filename).replace("\\", "/")) + self.path, self.structure = self.model_or_sim.register_package( + self, not loading_package, pname is None, filename is None + ) + self.dimensions = self.create_package_dimensions() + + if self.path is None: + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + print( + "WARNING: Package type {} failed to register property. {}".format( + self._package_type, self.path + ) + ) + if self.parent_file is not None: + self.container_type.append(PackageContainerType.package) + # init variables that may be used later + self.post_block_comments = None + self.last_error = None + self.bc_color = "black" + self.__inattr = False + self._child_package_groups = {} + child_builder_call = kwargs.pop("child_builder_call", None) + if ( + self.parent_file is not None + and child_builder_call is None + and package_type in self.parent_file._child_package_groups + ): + # initialize as part of the parent's child package group + chld_pkg_grp = self.parent_file._child_package_groups[package_type] + chld_pkg_grp.init_package(self, self._filename, False) + + # remove any remaining valid kwargs + key_list = list(kwargs.keys()) + for key in key_list: + if "filerecord" in key and hasattr(self, f"{key}"): + kwargs.pop(f"{key}") + # check for extraneous kwargs + if len(kwargs) > 0: + kwargs_str = ", ".join(kwargs.keys()) + excpt_str = f'Extraneous kwargs "{kwargs_str}" provided to MFPackage.' + raise FlopyException(excpt_str) + + def __init_subclass__(cls): + """Register package type""" + super().__init_subclass__() + PackageContainer.packages_by_abbr[cls.package_abbr] = cls + + def __setattr__(self, name, value): + if hasattr(self, name) and getattr(self, name) is not None: + attribute = object.__getattribute__(self, name) + if attribute is not None and isinstance(attribute, mfdata.MFData): + try: + if isinstance(attribute, mfdatalist.MFList): + attribute.set_data(value, autofill=True) + else: + attribute.set_data(value) + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self.model_name, + package=self._get_pname(), + ) + return + + if all(hasattr(self, attr) for attr in ["model_or_sim", "_package_type"]): + if hasattr(self.model_or_sim, "_mg_resync"): + if not self.model_or_sim._mg_resync: + self.model_or_sim._mg_resync = self._mg_resync + + super().__setattr__(name, value) + + def __repr__(self): + return self._get_data_str(True) + + def __str__(self): + return self._get_data_str(False) + + @property + def filename(self): + """Package's file name.""" + return self._filename + + @property + def quoted_filename(self): + """Package's file name with quotes if there is a space.""" + if " " in self._filename: + return f'"{self._filename}"' + return self._filename + + @filename.setter + def filename(self, fname): + """Package's file name.""" + if ( + isinstance(self.parent_file, MFPackage) + and self.package_type in self.parent_file._child_package_groups + ): + fname = datautil.clean_filename(fname) + try: + child_pkg_group = self.parent_file._child_package_groups[ + self.structure.file_type + ] + child_pkg_group._update_filename(self._filename, fname) + except Exception: + print( + "WARNING: Unable to update file name for parent" + f"package of {self.package_name}." + ) + if self.model_or_sim is not None and fname is not None: + if self._package_type != "nam": + self.model_or_sim.update_package_filename(self, fname) + self._filename = fname + + @property + def package_type(self): + """String describing type of package""" + return self._package_type + + @property + def name(self): + """Name of package""" + return [self.package_name] + + @name.setter + def name(self, name): + """Name of package""" + self.package_name = name + + @property + def parent(self): + """Parent package""" + return self.model_or_sim + + @parent.setter + def parent(self, parent): + """Parent package""" + assert False, "Do not use this setter to set the parent" + + @property + def plottable(self): + """If package is plottable""" + if self.model_or_sim.type == "Simulation": + return False + else: + return True + + @property + def output(self): + """ + Method to get output associated with a specific package + + Returns + ------- + MF6Output object + """ + return MF6Output(self) + + @property + def data_list(self): + """List of data in this package.""" + # return [data_object, data_object, ...] + return self._data_list + + @property + def package_key_dict(self): + """ + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_type_dict + + @property + def package_names(self): + """Returns a list of package names. + + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_names + + @property + def package_dict(self): + """ + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_dict + + @property + def package_type_dict(self): + """ + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_type_dict + + @property + def package_name_dict(self): + """ + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_name_dict + + @property + def package_filename_dict(self): + """ + .. deprecated:: 3.9 + This method is for internal use only and will be deprecated. + """ + warnings.warn( + "This method is for internal use only and will be deprecated.", + category=DeprecationWarning, + ) + return self._package_container.package_filename_dict + + def netcdf_attrs(self, mesh=None): + attrs = {} + + def attr_d(tagname, iaux=None, layer=None): + tag = tagname + name = f"{self.package_name}" + if iaux: + auxvar = self.dimensions.get_aux_variables()[0] + tag = f"{tag}/{iaux}" + name = f"{name}_{auxvar[iaux]}" + else: + name = f"{name}_{tagname}" + if layer: + tag = f"{tag}/layer{layer}" + name = f"{name}_l{layer}" + + a = {} + a["varname"] = name + a["attrs"] = {} + a["attrs"]["modflow_input"] = ( + f"{self.model_name}/{self.package_name}/{tagname}" + ).upper() + if iaux: + a["attrs"]["modflow_iaux"] = iaux + if layer: + a["attrs"]["layer"] = layer + return tag, a + + for key, block in self.blocks.items(): + if key != "griddata" and key != "period": + continue + for dataset in block.datasets.values(): + if isinstance(dataset, mfdataarray.MFArray): + for index, data_item in enumerate( + dataset.structure.data_item_structures + ): + if not (dataset.structure.netcdf and dataset.has_data()): + continue + if dataset.structure.layered and mesh == "LAYERED": + if data_item.name == "aux" or data_item.name == "auxvar": + for n, auxname in enumerate( + self.dimensions.get_aux_variables()[0] + ): + if auxname == "auxiliary" and n == 0: + continue + for l in range(self.model_or_sim.modelgrid.nlay): + key, a = attr_d(data_item.name, n, l + 1) + attrs[key] = a + else: + for l in range(self.model_or_sim.modelgrid.nlay): + key, a = attr_d(data_item.name, layer=l + 1) + attrs[key] = a + else: + if data_item.name == "aux" or data_item.name == "auxvar": + for n, auxname in enumerate( + self.dimensions.get_aux_variables()[0] + ): + if auxname == "auxiliary" and n == 0: + continue + key, a = attr_d(data_item.name, iaux=n) + attrs[key] = a + else: + key, a = attr_d(data_item.name) + attrs[key] = a + return attrs + + def get_package(self, name=None, type_only=False, name_only=False): + """ + Finds a package by package name, package key, package type, or partial + package name. returns either a single package, a list of packages, + or None. + + Parameters + ---------- + name : str + Name or type of the package, 'my-riv-1, 'RIV', 'LPF', etc. + type_only : bool + Search for package by type only + name_only : bool + Search for package by name only + + Returns + ------- + pp : Package object + + """ + return self._package_container.get_package(name, type_only, name_only) + + def add_package(self, package): + pkg_type = package.package_type.lower() + if pkg_type in self._package_container.package_type_dict: + for existing_pkg in self._package_container.package_type_dict[pkg_type]: + if existing_pkg is package: + # do not add the same package twice + return + self._package_container.add_package(package) + + def _get_aux_data(self, aux_names): + if hasattr(self, "stress_period_data"): + spd = self.stress_period_data.get_data() + if ( + 0 in spd + and spd[0] is not None + and aux_names[0][1] in spd[0].dtype.names + ): + return spd + if hasattr(self, "packagedata"): + pd = self.packagedata.get_data() + if aux_names[0][1] in pd.dtype.names: + return pd + if hasattr(self, "perioddata"): + spd = self.perioddata.get_data() + if ( + 0 in spd + and spd[0] is not None + and aux_names[0][1] in spd[0].dtype.names + ): + return spd + if hasattr(self, "aux"): + return self.aux.get_data() + return None + + def _boundnames_active(self): + if hasattr(self, "boundnames"): + if self.boundnames.get_data(): + return True + return False + + def check(self, f=None, verbose=True, level=1, checktype=None): + """ + Data check, returns True on success. + + Warning + ------- + The MF6 check mechanism is deprecated pending reimplementation + in a future release. While the checks API will remain in place + through 3.x, it may be unstable, and will likely change in 4.x. + """ + + if checktype is None: + checktype = mf6check + # do general checks + chk = super().check(f, verbose, level, checktype) + + # do mf6 specific checks + if hasattr(self, "auxiliary"): + # auxiliary variable check + # check if auxiliary variables are defined + aux_names = self.auxiliary.get_data() + if aux_names is not None and len(aux_names[0]) > 1: + num_aux_names = len(aux_names[0]) - 1 + # check for stress period data + aux_data = self._get_aux_data(aux_names) + if aux_data is not None and len(aux_data) > 0: + # make sure the check object exists + if chk is None: + chk = self._get_check(f, verbose, level, checktype) + if isinstance(aux_data, dict): + aux_datasets = list(aux_data.values()) + else: + aux_datasets = [aux_data] + dataset_type = "unknown" + for dataset in aux_datasets: + if isinstance(dataset, np.recarray): + dataset_type = "recarray" + break + elif isinstance(dataset, np.ndarray): + dataset_type = "ndarray" + break + # if aux data is in a list + if dataset_type == "recarray": + # check for time series data + time_series_name_dict = {} + if hasattr(self, "ts") and hasattr( + self.ts, "time_series_namerecord" + ): + # build dictionary of time series data variables + ts_nr = self.ts.time_series_namerecord.get_data() + if ts_nr is not None: + for item in ts_nr: + if len(item) > 0 and item[0] is not None: + time_series_name_dict[item[0]] = True + # auxiliary variables are last unless boundnames + # defined, then second to last + if self._boundnames_active(): + offset = 1 + else: + offset = 0 + + # loop through stress period datasets with aux data + for data in aux_datasets: + if isinstance(data, np.recarray): + for row in data: + row_size = len(row) + aux_start_loc = ( + row_size - num_aux_names - offset - 1 + ) + # loop through auxiliary variables + for idx, var in enumerate(list(aux_names[0])[1:]): + # get index of current aux variable + data_index = aux_start_loc + idx + # verify auxiliary value is either + # numeric or time series variable + if ( + not datautil.DatumUtil.is_float( + row[data_index] + ) + and row[data_index] + not in time_series_name_dict + ): + desc = ( + f"Invalid non-numeric " + f"value " + f"'{row[data_index]}' " + f"in auxiliary data." + ) + chk._add_to_summary( + "Error", + desc=desc, + package=self.package_name, + ) + # else if stress period data is arrays + elif dataset_type == "ndarray": + # loop through auxiliary stress period datasets + for data in aux_datasets: + # verify auxiliary value is either numeric or time + # array series variable + if isinstance(data, np.ndarray): + val = np.isnan(np.sum(data)) + if val: + desc = ( + "One or more nan values were " + "found in auxiliary data." + ) + chk._add_to_summary( + "Warning", + desc=desc, + package=self.package_name, + ) + return chk + + def _get_nan_exclusion_list(self): + excl_list = [] + if hasattr(self, "stress_period_data"): + spd_struct = self.stress_period_data.structure + for item_struct in spd_struct.data_item_structures: + if item_struct.optional or item_struct.keystring_dict: + excl_list.append(item_struct.name) + return excl_list + + def _get_data_str(self, formal, show_data=True): + data_str = ( + "package_name = {}\nfilename = {}\npackage_type = {}" + "\nmodel_or_simulation_package = {}" + "\n{}_name = {}" + "\n".format( + self._get_pname(), + self._filename, + self.package_type, + self.model_or_sim.type.lower(), + self.model_or_sim.type.lower(), + self.model_or_sim.name, + ) + ) + if self.parent_file is not None and formal: + data_str = f"{data_str}parent_file = {self.parent_file._get_pname()}\n\n" + else: + data_str = f"{data_str}\n" + if show_data: + for block in self.blocks.values(): + if formal: + bl_repr = repr(block) + if len(bl_repr.strip()) > 0: + data_str = "{}Block {}\n--------------------\n{}\n".format( + data_str, block.structure.name, repr(block) + ) + else: + bl_str = str(block) + if len(bl_str.strip()) > 0: + data_str = "{}Block {}\n--------------------\n{}\n".format( + data_str, block.structure.name, str(block) + ) + return data_str + + def _get_pname(self): + if self.package_name is not None: + return str(self.package_name) + else: + return str(self._filename) + + def _get_block_header_info(self, line, path): + # init + header_variable_strs = [] + arr_clean_line = line.strip().split() + header_comment = MFComment( + "", path + (arr_clean_line[1],), self.simulation_data, 0 + ) + # break header into components + if len(arr_clean_line) < 2: + message = ( + "Block header does not contain a name. Name " + 'expected in line "{}".'.format(line) + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.model_name, + self._get_pname(), + self.path, + "parsing block header", + None, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self.simulation_data.debug, + ) + elif len(arr_clean_line) == 2: + return MFBlockHeader( + arr_clean_line[1], + header_variable_strs, + header_comment, + self.simulation_data, + path, + ) + else: + # process text after block name + comment = False + for entry in arr_clean_line[2:]: + # if start of comment + if MFComment.is_comment(entry.strip()[0]): + comment = True + if comment: + header_comment.text = " ".join([header_comment.text, entry]) + else: + header_variable_strs.append(entry) + return MFBlockHeader( + arr_clean_line[1], + header_variable_strs, + header_comment, + self.simulation_data, + path, + ) + + def _update_size_defs(self): + # build temporary data lookup by name + data_lookup = {} + for block in self.blocks.values(): + for dataset in block.datasets.values(): + data_lookup[dataset.structure.name] = dataset + + # loop through all data + for block in self.blocks.values(): + for dataset in block.datasets.values(): + # if data shape is 1-D + if dataset.structure.shape and len(dataset.structure.shape) == 1: + # if shape name is data in this package + if dataset.structure.shape[0] in data_lookup: + size_def = data_lookup[dataset.structure.shape[0]] + size_def_name = size_def.structure.name + + if isinstance(dataset, mfdata.MFTransient): + # for transient data always use the maximum size + new_size = -1 + for key in dataset.get_active_key_list(): + try: + data = dataset.get_data(key=key[0]) + except (OSError, MFDataException): + # TODO: Handle case where external file + # path has been moved + data = None + if data is not None: + data_len = len(data) + if data_len > new_size: + new_size = data_len + else: + # for all other data set max to size + new_size = -1 + try: + data = dataset.get_data() + except (OSError, MFDataException): + # TODO: Handle case where external file + # path has been moved + data = None + if data is not None: + new_size = len(dataset.get_data()) + + if size_def.get_data() is None: + current_size = -1 + else: + current_size = size_def.get_data() + + if new_size > current_size: + # store current size + size_def.set_data(new_size) + + # informational message to the user + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + print( + "INFORMATION: {} in {} changed to {} " + "based on size of {}".format( + size_def_name, + size_def.structure.path[:-1], + new_size, + dataset.structure.name, + ) + ) + + def inspect_cells(self, cell_list, stress_period=None): + """ + Inspect model cells. Returns package data associated with cells. + + Parameters + ---------- + cell_list : list of tuples + List of model cells. Each model cell is a tuple of integers. + ex: [(1,1,1), (2,4,3)] + stress_period : int + For transient data, only return data from this stress period. If + not specified or None, all stress period data will be returned. + + Returns + ------- + output : array + Array containing inspection results + + """ + data_found = [] + + # loop through blocks + local_index_names = [] + local_index_blocks = [] + local_index_values = [] + local_index_cellids = [] + # loop through blocks in package + for block in self.blocks.values(): + # loop through data in block + for dataset in block.datasets.values(): + if isinstance(dataset, mfdatalist.MFList): + # handle list data + cellid_column = None + local_index_name = None + # loop through list data column definitions + for index, data_item in enumerate( + dataset.structure.data_item_structures + ): + if index == 0 and data_item.type == DatumType.integer: + local_index_name = data_item.name + # look for cellid column in list data row + if isinstance(data_item, MFDataItemStructure) and ( + data_item.is_cellid or data_item.possible_cellid + ): + cellid_column = index + break + if cellid_column is not None: + data_output = DataSearchOutput(dataset.path) + local_index_vals = [] + local_index_cells = [] + # get data + if isinstance(dataset, mfdatalist.MFTransientList): + # data may be in multiple transient blocks, get + # data from appropriate blocks + main_data = dataset.get_data(stress_period) + if stress_period is not None: + main_data = {stress_period: main_data} + else: + # data is all in one block, get data + main_data = {-1: dataset.get_data()} + + # loop through each dataset + for key, value in main_data.items(): + if value is None: + continue + if data_output.data_header is None: + data_output.data_header = value.dtype.names + # loop through list data rows + for line in value: + # loop through list of cells we are searching + # for + for cell in cell_list: + if isinstance( + line[cellid_column], tuple + ) and cellids_equal(line[cellid_column], cell): + # save data found + data_output.data_entries.append(line) + data_output.data_entry_ids.append(cell) + data_output.data_entry_stress_period.append(key) + if datautil.DatumUtil.is_int(line[0]): + # save index data for further + # processing. assuming index is + # always first entry + local_index_vals.append(line[0]) + local_index_cells.append(cell) + + if local_index_name is not None and len(local_index_vals) > 0: + # capture index lookups for scanning related data + local_index_names.append(local_index_name) + local_index_blocks.append(block.path[-1]) + local_index_values.append(local_index_vals) + local_index_cellids.append(local_index_cells) + if len(data_output.data_entries) > 0: + data_found.append(data_output) + elif isinstance(dataset, mfdataarray.MFArray): + # handle array data + data_shape = copy.deepcopy( + dataset.structure.data_item_structures[0].shape + ) + if dataset.path[-1] == "top": + # top is a special case where the two datasets + # need to be combined to get the correct layer top + model_grid = self.model_or_sim.modelgrid + main_data = {-1: model_grid.top_botm} + data_shape.append("nlay") + else: + if isinstance(dataset, mfdataarray.MFTransientArray): + # data may be in multiple blocks, get data from + # appropriate blocks + main_data = dataset.get_data(stress_period) + if stress_period is not None: + main_data = {stress_period: main_data} + else: + # data is all in one block, get a process data + main_data = {-1: dataset.get_data()} + if main_data is None: + continue + data_output = DataSearchOutput(dataset.path) + # loop through datasets + for key, array_data in main_data.items(): + if array_data is None: + continue + self.model_or_sim.match_array_cells( + cell_list, data_shape, array_data, key, data_output + ) + if len(data_output.data_entries) > 0: + data_found.append(data_output) + + if len(local_index_names) > 0: + # look for data that shares the index value with data found + # for example a shared well or reach number + for block in self.blocks.values(): + # loop through data + for dataset in block.datasets.values(): + if isinstance(dataset, mfdatalist.MFList): + data_item = dataset.structure.data_item_structures[0] + data_output = DataSearchOutput(dataset.path) + # loop through previous data found + for ( + local_index_name, + local_index_vals, + cell_ids, + local_block_name, + ) in zip( + local_index_names, + local_index_values, + local_index_cellids, + local_index_blocks, + ): + if local_block_name == block.path[-1]: + continue + if ( + isinstance(data_item, MFDataItemStructure) + and data_item.name == local_index_name + and data_item.type == DatumType.integer + ): + # matching data index type found, get data + if isinstance(dataset, mfdatalist.MFTransientList): + # data may be in multiple blocks, get data + # from appropriate blocks + main_data = dataset.get_data(stress_period) + if stress_period is not None: + main_data = {stress_period: main_data} + else: + # data is all in one block + main_data = {-1: dataset.get_data()} + # loop through the data + for key, value in main_data.items(): + if value is None: + continue + if data_output.data_header is None: + data_output.data_header = value.dtype.names + # loop through each row of data + for line in value: + # loop through the index values we are + # looking for + for index_val, cell_id in zip( + local_index_vals, cell_ids + ): + # try to match index values we are + # looking for to the data + if index_val == line[0]: + # save data found + data_output.data_entries.append(line) + data_output.data_entry_ids.append( + index_val + ) + data_output.data_entry_cellids.append( + cell_id + ) + data_output.data_entry_stress_period.append( + key + ) + if len(data_output.data_entries) > 0: + data_found.append(data_output) + return data_found + + def remove(self): + """Removes this package from the simulation/model it is currently a + part of. + """ + self.model_or_sim.remove_package(self) + + def build_child_packages_container(self, pkg_type, filerecord): + """Builds a container object for any child packages. This method is + only intended for FloPy internal use.""" + # get package class + package_obj = PackageContainer.package_factory( + pkg_type, self.model_or_sim.model_type + ) + # create child package object + child_pkgs_name = f"utl{pkg_type}packages" + child_pkgs_obj = PackageContainer.package_factory(child_pkgs_name, "") + if child_pkgs_obj is None and self.model_or_sim.model_type is None: + # simulation level object, try just the package type in the name + child_pkgs_name = f"{pkg_type}packages" + child_pkgs_obj = PackageContainer.package_factory(child_pkgs_name, "") + if child_pkgs_obj is None: + # see if the package is part of one of the supported model types + for model_type in MFStructure().sim_struct.model_types: + child_pkgs_name = f"{model_type}{pkg_type}packages" + child_pkgs_obj = PackageContainer.package_factory(child_pkgs_name, "") + if child_pkgs_obj is not None: + break + child_pkgs = child_pkgs_obj( + self.model_or_sim, self, pkg_type, filerecord, None, package_obj + ) + setattr(self, pkg_type, child_pkgs) + self._child_package_groups[pkg_type] = child_pkgs + + def _get_dfn_name_dict(self): + dfn_name_dict = {} + item_num = 0 + for item in self.structure.dfn_list: + if len(item) > 1: + item_name = item[1].split() + if len(item_name) > 1 and item_name[0] == "name": + dfn_name_dict[item_name[1]] = item_num + item_num += 1 + return dfn_name_dict + + def build_child_package(self, pkg_type, data, parameter_name, filerecord): + """Builds a child package. This method is only intended for FloPy + internal use.""" + if not hasattr(self, pkg_type): + self.build_child_packages_container(pkg_type, filerecord) + if data is not None: + package_group = getattr(self, pkg_type) + # build child package file name + child_path = package_group.next_default_file_path() + # create new empty child package + package_obj = PackageContainer.package_factory( + pkg_type, self.model_or_sim.model_type + ) + package = package_obj(self, filename=child_path, child_builder_call=True) + assert hasattr(package, parameter_name) + + if isinstance(data, dict): + # order data correctly + dfn_name_dict = package._get_dfn_name_dict() + ordered_data_items = [] + for key, value in data.items(): + if key in dfn_name_dict: + ordered_data_items.append([dfn_name_dict[key], key, value]) + else: + ordered_data_items.append([999999, key, value]) + ordered_data_items = sorted(ordered_data_items, key=lambda x: x[0]) + + # evaluate and add data to package + unused_data = {} + for order, key, value in ordered_data_items: + # if key is an attribute of the child package + if isinstance(key, str) and hasattr(package, key): + # set child package attribute + child_data_attr = getattr(package, key) + if isinstance(child_data_attr, mfdatalist.MFList): + child_data_attr.set_data(value, autofill=True) + elif isinstance(child_data_attr, mfdata.MFData): + child_data_attr.set_data(value) + elif key == "fname" or key == "filename": + child_path = value + package._filename = value + else: + setattr(package, key, value) + else: + unused_data[key] = value + if unused_data: + setattr(package, parameter_name, unused_data) + else: + setattr(package, parameter_name, data) + + # append package to list + package_group.init_package(package, child_path) + return package + + def build_mfdata(self, var_name, data=None): + """Returns the appropriate data type object (mfdatalist, mfdataarray, + or mfdatascalar) given that object the appropriate structure (looked + up based on var_name) and any data supplied. This method is for + internal FloPy library use only. + + Parameters + ---------- + var_name : str + Variable name + + data : many supported types + Data contained in this object + + Returns + ------- + data object : MFData subclass + + """ + if self.loading_package: + data = None + for key, block in self.structure.blocks.items(): + if var_name in block.data_structures: + if block.name not in self.blocks: + self.blocks[block.name] = MFBlock( + self.simulation_data, + self.dimensions, + block, + self.path + (key,), + self.model_or_sim, + self, + ) + dataset_struct = block.data_structures[var_name] + var_path = self.path + (key, var_name) + ds = self.blocks[block.name].add_dataset(dataset_struct, data, var_path) + self._data_list.append(ds) + return ds + + message = 'Unable to find variable "{}" in package "{}".'.format( + var_name, self.package_type + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.model_name, + self._get_pname(), + self.path, + "building data objects", + None, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self.simulation_data.debug, + ) + + def set_model_relative_path(self, model_ws): + """Sets the model path relative to the simulation's path. + + Parameters + ---------- + model_ws : str + Model path relative to the simulation's path. + + """ + # update blocks + for key, block in self.blocks.items(): + block.set_model_relative_path(model_ws) + # update sub-packages + for package in self._package_container.packagelist: + package.set_model_relative_path(model_ws) + + def set_all_data_external( + self, + check_data=True, + external_data_folder=None, + base_name=None, + binary=False, + ): + """Sets the package's list and array data to be stored externally. + + Parameters + ---------- + check_data : bool + Determine if data error checking is enabled + external_data_folder + Folder where external data will be stored + base_name: str + Base file name prefix for all files + binary: bool + Whether file will be stored as binary + """ + # set blocks + for key, block in self.blocks.items(): + file_name = os.path.split(self.filename)[1] + if base_name is not None: + file_name = f"{base_name}_{file_name}" + block.set_all_data_external( + file_name, + check_data, + external_data_folder, + binary, + ) + # set sub-packages + for package in self._package_container.packagelist: + package.set_all_data_external( + check_data, + external_data_folder, + base_name, + binary, + ) + + def set_all_data_internal(self, check_data=True): + """Sets the package's list and array data to be stored internally. + + Parameters + ---------- + check_data : bool + Determine if data error checking is enabled + + """ + # set blocks + for key, block in self.blocks.items(): + block.set_all_data_internal(check_data) + # set sub-packages + for package in self._package_container.packagelist: + package.set_all_data_internal(check_data) + + def load(self, strict=True): + """Loads the package from file. + + Parameters + ---------- + strict : bool + Enforce strict checking of data. + + Returns + ------- + success : bool + + """ + # open file + try: + fd_input_file = open(datautil.clean_filename(self.get_file_path()), "r") + except OSError as e: + if e.errno == errno.ENOENT: + message = "File {} of type {} could not be opened.".format( + self.get_file_path(), self.package_type + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.model_name, + self.package_name, + self.path, + "loading package file", + None, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self.simulation_data.debug, + ) + + try: + self._load_blocks(fd_input_file, strict) + except ReadAsArraysException as err: + fd_input_file.close() + raise ReadAsArraysException(err) + # close file + fd_input_file.close() + + if self.simulation_data.auto_set_sizes: + self._update_size_defs() + + # return validity of file + return self.is_valid() + + def is_valid(self): + """Returns whether or not this package is valid. + + Returns + ------- + is valid : bool + + """ + # Check blocks + for block in self.blocks.values(): + # Non-optional blocks must be enabled + if ( + block.structure.number_non_optional_data() > 0 + and not block.enabled + and block.is_allowed() + ): + self.last_error = ( + f'Required block "{block.block_header.name}" not enabled' + ) + return False + # Enabled blocks must be valid + if block.enabled and not block.is_valid: + self.last_error = f'Invalid block "{block.block_header.name}"' + return False + + return True + + def _load_blocks(self, fd_input_file, strict=True, max_blocks=sys.maxsize): + # init + self.simulation_data.mfdata[self.path + ("pkg_hdr_comments",)] = MFComment( + "", self.path, self.simulation_data + ) + self.post_block_comments = MFComment("", self.path, self.simulation_data) + + blocks_read = 0 + found_first_block = False + line = " " + while line != "": + line = fd_input_file.readline() + clean_line = line.strip() + # If comment or empty line + if MFComment.is_comment(clean_line, True): + self._store_comment(line, found_first_block) + elif len(clean_line) > 4 and clean_line[:5].upper() == "BEGIN": + # parse block header + try: + block_header_info = self._get_block_header_info(line, self.path) + except MFDataException as mfde: + message = ( + "An error occurred while loading block header " + 'in line "{}".'.format(line) + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.model_name, + self._get_pname(), + self.path, + "loading block header", + None, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self.simulation_data.debug, + mfde, + ) + + # if there is more than one possible block with the same name, + # resolve the correct block to use + block_key = block_header_info.name.lower() + block_num = 1 + possible_key = f"{block_header_info.name.lower()}-{block_num}" + if possible_key in self.blocks: + block_key = possible_key + block_header_name = block_header_info.name.lower() + while ( + block_key in self.blocks + and not self.blocks[block_key].is_allowed() + ): + block_key = f"{block_header_name}-{block_num}" + block_num += 1 + + if block_key not in self.blocks: + # block name not recognized, load block as comments and + # issue a warning + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + warning_str = ( + 'WARNING: Block "{}" is not a valid block ' + "name for file type " + "{}.".format(block_key, self.package_type) + ) + print(warning_str) + self._store_comment(line, found_first_block) + while line != "": + line = fd_input_file.readline() + self._store_comment(line, found_first_block) + arr_line = datautil.PyListUtil.split_data_line(line) + if arr_line and ( + len(arr_line[0]) <= 2 or arr_line[0][:3].upper() == "END" + ): + break + else: + found_first_block = True + skip_block = False + cur_block = self.blocks[block_key] + if cur_block.loaded: + # Only blocks defined as repeating are allowed to have + # multiple entries + header_name = block_header_info.name + if not self.structure.blocks[header_name.lower()].repeating(): + # warn and skip block + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + warning_str = ( + 'WARNING: Block "{}" has ' + "multiple entries and is not " + "intended to be a repeating " + "block ({} package" + ")".format(header_name, self.package_type) + ) + print(warning_str) + skip_block = True + bhs = cur_block.structure.block_header_structure + bhval = block_header_info.variable_strings + if len(bhs) > 0 and len(bhval) > 0 and bhs[0].name == "iper": + nper = self.simulation_data.mfdata[ + ("tdis", "dimensions", "nper") + ].get_data() + bhval_int = datautil.DatumUtil.is_int(bhval[0]) + if not bhval_int or int(bhval[0]) > nper: + # skip block when block stress period is greater + # than nper + skip_block = True + + if not skip_block: + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print(f" loading block {cur_block.structure.name}...") + # reset comments + self.post_block_comments = MFComment( + "", self.path, self.simulation_data + ) + + cur_block.load(block_header_info, fd_input_file, strict) + + # write post block comment + self.simulation_data.mfdata[ + cur_block.block_headers[-1].blk_post_comment_path + ] = self.post_block_comments + + blocks_read += 1 + if blocks_read >= max_blocks: + break + else: + # treat skipped block as if it is all comments + arr_line = datautil.PyListUtil.split_data_line(clean_line) + self.post_block_comments.add_text(str(line), True) + while arr_line and ( + len(line) <= 2 or arr_line[0][:3].upper() != "END" + ): + line = fd_input_file.readline() + arr_line = datautil.PyListUtil.split_data_line(line.strip()) + if arr_line: + self.post_block_comments.add_text(str(line), True) + self.simulation_data.mfdata[ + cur_block.block_headers[-1].blk_post_comment_path + ] = self.post_block_comments + + else: + if not ( + len(clean_line) == 0 + or (len(line) > 2 and line[:3].upper() == "END") + ): + # Record file location of beginning of unresolved text + # treat unresolved text as a comment for now + self._store_comment(line, found_first_block) + + def write(self, ext_file_action=ExtFileAction.copy_relative_paths): + """Writes the package to a file. + + Parameters + ---------- + ext_file_action : ExtFileAction + How to handle pathing of external data files. + """ + if self.simulation_data.auto_set_sizes: + self._update_size_defs() + + # create any folders in path + package_file_path = self.get_file_path() + package_folder = os.path.split(package_file_path)[0] + if package_folder and not os.path.isdir(package_folder): + os.makedirs(os.path.split(package_file_path)[0]) + + # open file + fd = open(package_file_path, "w") + + # write flopy header + if self.simulation_data.write_headers: + dt = datetime.datetime.now() + header = "# File generated by Flopy version {} on {} at {}.\n".format( + __version__, + dt.strftime("%m/%d/%Y"), + dt.strftime("%H:%M:%S"), + ) + fd.write(header) + + # write blocks + self._write_blocks(fd, ext_file_action) + + fd.close() + + def create_package_dimensions(self): + """Creates a package dimensions object. For internal FloPy library + use. + + Returns + ------- + package dimensions : PackageDimensions + + """ + model_dims = None + if self.container_type[0] == PackageContainerType.model: + model_dims = [ + modeldimensions.ModelDimensions(self.path[0], self.simulation_data) + ] + else: + # this is a simulation file that does not correspond to a specific + # model. figure out which model to use and return a dimensions + # object for that model + if self.dfn_file_name[0:3] == "exg": + exchange_rec_array = self.simulation_data.mfdata[ + ("nam", "exchanges", "exchanges") + ].get_data() + if exchange_rec_array is None: + return None + for exchange in exchange_rec_array: + if exchange[1].lower() == self._filename.lower(): + model_dims = [ + modeldimensions.ModelDimensions( + exchange[2], self.simulation_data + ), + modeldimensions.ModelDimensions( + exchange[3], self.simulation_data + ), + ] + break + elif ( + self.dfn_file_name[4:7] == "gnc" + and self.model_or_sim.type == "Simulation" + ): + # get exchange file name associated with gnc package + if self.parent_file is not None: + exg_file_name = self.parent_file.filename + else: + raise Exception( + "Can not create a simulation-level " + "gnc file without a corresponding " + "exchange file. Exchange file must be " + "created first." + ) + # get models associated with exchange file from sim nam file + try: + exchange_recarray_data = ( + self.model_or_sim.name_file.exchanges.get_data() + ) + except MFDataException as mfde: + message = ( + "An error occurred while retrieving exchange " + "data from the simulation name file. The error " + "occurred while processing gnc file " + f'"{self.filename}".' + ) + raise MFDataException( + mfdata_except=mfde, + package=self._get_pname(), + message=message, + ) + assert exchange_recarray_data is not None + model_1 = None + model_2 = None + for exchange in exchange_recarray_data: + if exchange[1] == exg_file_name: + model_1 = exchange[2] + model_2 = exchange[3] + + # assign models to gnc package + model_dims = [ + modeldimensions.ModelDimensions(model_1, self.simulation_data), + modeldimensions.ModelDimensions(model_2, self.simulation_data), + ] + elif self.parent_file is not None: + model_dims = [] + for md in self.parent_file.dimensions.model_dim: + model_name = md.model_name + model_dims.append( + modeldimensions.ModelDimensions( + model_name, self.simulation_data + ) + ) + else: + model_dims = [ + modeldimensions.ModelDimensions(None, self.simulation_data) + ] + return modeldimensions.PackageDimensions(model_dims, self.structure, self.path) + + def _store_comment(self, line, found_first_block): + # Store comment + if found_first_block: + self.post_block_comments.text += line + else: + self.simulation_data.mfdata[self.path + ("pkg_hdr_comments",)].text += line + + def _write_blocks(self, fd, ext_file_action): + # verify that all blocks are valid + if not self.is_valid(): + message = ( + 'Unable to write out model file "{}" due to the ' + "following error: " + "{} ({})".format(self._filename, self.last_error, self.path) + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.model_name, + self._get_pname(), + self.path, + "writing package blocks", + None, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self.simulation_data.debug, + ) + + # write initial comments + pkg_hdr_comments_path = self.path + ("pkg_hdr_comments",) + if pkg_hdr_comments_path in self.simulation_data.mfdata: + self.simulation_data.mfdata[self.path + ("pkg_hdr_comments",)].write( + fd, False + ) + + # loop through blocks + block_num = 1 + for block in self.blocks.values(): + if ( + self.simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print(f" writing block {block.structure.name}...") + # write block + block.write(fd, ext_file_action=ext_file_action) + block_num += 1 + + def get_file_path(self): + """Returns the package file's path. + + Returns + ------- + file path : str + """ + if self.path[0] in self.simulation_data.mfpath.model_relative_path: + return os.path.join( + self.simulation_data.mfpath.get_model_path(self.path[0]), + self._filename, + ) + else: + return os.path.join( + self.simulation_data.mfpath.get_sim_path(), self._filename + ) + + def export(self, f, **kwargs): + """ + Method to export a package to netcdf or shapefile based on the + extension of the file name (.shp for shapefile, .nc for netcdf) + + Parameters + ---------- + f : str + Filename + kwargs : keyword arguments + modelgrid : flopy.discretization.Grid instance + User supplied modelgrid which can be used for exporting + in lieu of the modelgrid associated with the model object + + Returns + ------- + None or Netcdf object + + """ + from .. import export + + return export.utils.package_export(f, self, **kwargs) + + def plot(self, **kwargs): + """ + Plot 2-D, 3-D, transient 2-D, and stress period list (MfList) + package input data + + Parameters + ---------- + **kwargs : dict + filename_base : str + Base file name that will be used to automatically generate + file names for output image files. Plots will be exported as + image files if file_name_base is not None. (default is None) + file_extension : str + Valid matplotlib.pyplot file extension for savefig(). Only + used if filename_base is not None. (default is 'png') + mflay : int + MODFLOW zero-based layer number to return. If None, then all + all layers will be included. (default is None) + kper : int + MODFLOW zero-based stress period number to return. (default is + zero) + key : str + MfList dictionary key. (default is None) + + Returns + ------- + axes : list + Empty list is returned if filename_base is not None. Otherwise + a list of matplotlib.pyplot.axis are returned. + + """ + from ..plot.plotutil import PlotUtilities + + if not self.plottable: + raise TypeError("Simulation level packages are not plottable") + + axes = PlotUtilities._plot_package_helper(self, **kwargs) + return axes + + +class MFChildPackages: + """ + Behind the scenes code for creating an interface to access child packages + from a parent package. This class is automatically constructed by the + FloPy library and is for internal library use only. + + Parameters + ---------- + """ + + def __init__( + self, + model_or_sim, + parent, + pkg_type, + filerecord, + package=None, + package_class=None, + ): + self._packages = [] + self._filerecord = filerecord + if package is not None: + self._packages.append(package) + self._model_or_sim = model_or_sim + self._cpparent = parent + self._pkg_type = pkg_type + self._package_class = package_class + + def __init_subclass__(cls): + """Register package""" + super().__init_subclass__() + PackageContainer.packages_by_abbr[cls.package_abbr] = cls + + def __getattr__(self, attr): + if ( + "_packages" in self.__dict__ + and len(self._packages) > 0 + and hasattr(self._packages[0], attr) + ): + item = getattr(self._packages[0], attr) + return item + raise AttributeError(attr) + + def __getitem__(self, k): + if isinstance(k, int): + if k < len(self._packages): + return self._packages[k] + raise ValueError(f"Package index {k} does not exist.") + + def __setattr__(self, key, value): + if ( + key != "_packages" + and key != "_model_or_sim" + and key != "_cpparent" + and key != "_inattr" + and key != "_filerecord" + and key != "_package_class" + and key != "_pkg_type" + ): + if len(self._packages) == 0: + raise Exception( + "No {} package is currently attached to package" + " {}. Use the initialize method to create a(n) " + "{} package before attempting to access its " + "properties.".format( + self._pkg_type, self._cpparent.filename, self._pkg_type + ) + ) + package = self._packages[0] + setattr(package, key, value) + return + super().__setattr__(key, value) + + def __default_file_path_base(self, file_path, suffix=""): + stem = os.path.split(file_path)[1] + stem_lst = stem.split(".") + file_name = ".".join(stem_lst[:-1]) + if len(stem_lst) > 1: + file_ext = stem_lst[-1] + return f"{file_name}.{file_ext}{suffix}.{self._pkg_type}" + elif suffix != "": + return f"{stem}.{self._pkg_type}" + else: + return f"{stem}.{suffix}.{self._pkg_type}" + + def __file_path_taken(self, possible_path): + for package in self._packages: + # Do case insensitive compare + if package.filename.lower() == possible_path.lower(): + return True + return False + + def next_default_file_path(self): + possible_path = self.__default_file_path_base(self._cpparent.filename) + suffix = 0 + while self.__file_path_taken(possible_path): + possible_path = self.__default_file_path_base( + self._cpparent.filename, suffix + ) + suffix += 1 + return possible_path + + def init_package(self, package, fname, remove_packages=True): + if remove_packages: + # clear out existing packages + self._remove_packages() + elif fname is not None: + self._remove_packages(fname) + if fname is None: + # build a file name + fname = self.next_default_file_path() + package._filename = fname + # check file record variable + found = False + fr_data = self._filerecord.get_data() + if fr_data is not None: + for line in fr_data: + if line[0] == fname: + found = True + if not found: + # append file record variable + self._filerecord.append_data([(fname,)]) + # add the package to the list + self._packages.append(package) + + def _update_filename(self, old_fname, new_fname): + file_record = self._filerecord.get_data() + new_file_record_data = [] + if file_record is not None: + file_record_data = file_record[0] + for item in file_record_data: + base, fname = os.path.split(item) + if fname.lower() == old_fname.lower(): + if base: + new_file_record_data.append((os.path.join(base, new_fname),)) + else: + new_file_record_data.append((new_fname,)) + else: + new_file_record_data.append((item,)) + else: + new_file_record_data.append((new_fname,)) + self._filerecord.set_data(new_file_record_data) + + def _append_package(self, package, fname, update_frecord=True): + if fname is None: + # build a file name + fname = self.next_default_file_path() + package._filename = fname + + if update_frecord: + # set file record variable + file_record = self._filerecord.get_data() + file_record_data = file_record + new_file_record_data = [] + for item in file_record_data: + new_file_record_data.append((item[0],)) + new_file_record_data.append((fname,)) + self._filerecord.set_data(new_file_record_data) + + for existing_pkg in self._packages: + if existing_pkg is package: + # do not add the same package twice + return + # add the package to the list + self._packages.append(package) + + def _remove_packages(self, fname=None, only_pop_from_list=False): + rp_list = [] + for idx, package in enumerate(self._packages): + if fname is None or package.filename == fname: + if not only_pop_from_list: + self._model_or_sim.remove_package(package) + rp_list.append(idx) + for idx in reversed(rp_list): + self._packages.pop(idx) From 131b005bb36582617c2970cabb1ed2672ae14e55 Mon Sep 17 00:00:00 2001 From: mjreno Date: Tue, 12 Aug 2025 16:47:49 -0400 Subject: [PATCH 10/44] remove unintended files --- flopy/mf6/tmp/2/mfpackage.py | 3801 ----------------------------- flopy/mf6/tmp/mfmodel.py | 2229 ----------------- flopy/mf6/tmp/mfpackage.py | 3666 ---------------------------- flopy/mf6/tmp/mfstructure.py | 2113 ---------------- flopy/mf6/tmp/ruff/2/mfmodel.py | 2256 ----------------- flopy/mf6/tmp/ruff/2/mfpackage.py | 3720 ---------------------------- flopy/mf6/tmp/ruff/mfmodel.py | 2143 ---------------- flopy/mf6/tmp/ruff/mfpackage.py | 3497 -------------------------- 8 files changed, 23425 deletions(-) delete mode 100644 flopy/mf6/tmp/2/mfpackage.py delete mode 100644 flopy/mf6/tmp/mfmodel.py delete mode 100644 flopy/mf6/tmp/mfpackage.py delete mode 100644 flopy/mf6/tmp/mfstructure.py delete mode 100644 flopy/mf6/tmp/ruff/2/mfmodel.py delete mode 100644 flopy/mf6/tmp/ruff/2/mfpackage.py delete mode 100644 flopy/mf6/tmp/ruff/mfmodel.py delete mode 100644 flopy/mf6/tmp/ruff/mfpackage.py diff --git a/flopy/mf6/tmp/2/mfpackage.py b/flopy/mf6/tmp/2/mfpackage.py deleted file mode 100644 index d20612f27a..0000000000 --- a/flopy/mf6/tmp/2/mfpackage.py +++ /dev/null @@ -1,3801 +0,0 @@ -import copy -import datetime -import errno -import inspect -import os -import sys -import warnings - -import numpy as np - -from ..mbase import ModelInterface -from ..pakbase import PackageInterface -from ..utils import datautil -from ..utils.check import mf6check -from ..version import __version__ -from .coordinates import modeldimensions -from .data import ( - mfdata, - mfdataarray, - mfdatalist, - mfdataplist, - mfdatascalar, - mfstructure, -) -from .data.mfdatautil import DataSearchOutput, MFComment, cellids_equal -from .data.mfstructure import DatumType, MFDataItemStructure, MFStructure -from .mfbase import ( - ExtFileAction, - FlopyException, - MFDataException, - MFFileMgmt, - MFInvalidTransientBlockHeaderException, - PackageContainer, - PackageContainerType, - ReadAsArraysException, - VerbosityLevel, -) -from .utils.output_util import MF6Output - - -class MFBlockHeader: - """ - Represents the header of a block in a MF6 input file. This class is used - internally by FloPy and its direct use by a user of this library is not - recommend. - - Parameters - ---------- - name : str - Block name - variable_strings : list - List of strings that appear after the block name - comment : MFComment - Comment text in the block header - - Attributes - ---------- - name : str - Block name - variable_strings : list - List of strings that appear after the block name - comment : MFComment - Comment text in the block header - data_items : list - List of MFVariable of the variables contained in this block - - """ - - def __init__( - self, - name, - variable_strings, - comment, - simulation_data=None, - path=None, - block=None, - ): - self.name = name - self.variable_strings = variable_strings - self.block = block - if not ( - (simulation_data is None and path is None) - or (simulation_data is not None and path is not None) - ): - raise FlopyException( - "Block header must be initialized with both " - "simulation_data and path or with neither." - ) - if simulation_data is None: - self.comment = comment - self.simulation_data = None - self.path = path - self.comment_path = None - else: - self.connect_to_dict(simulation_data, path, comment) - # TODO: Get data_items from dictionary - self.data_items = [] - # build block comment paths - self.blk_trailing_comment_path = ("blk_trailing_comment",) - self.blk_post_comment_path = ("blk_post_comment",) - if isinstance(path, list): - path = tuple(path) - if path is not None: - self.blk_trailing_comment_path = path + ( - name, - "blk_trailing_comment", - ) - self.blk_post_comment_path = path + ( - name, - "blk_post_comment", - ) - if self.blk_trailing_comment_path not in simulation_data.mfdata: - simulation_data.mfdata[self.blk_trailing_comment_path] = ( - MFComment("", "", simulation_data, 0) - ) - if self.blk_post_comment_path not in simulation_data.mfdata: - simulation_data.mfdata[self.blk_post_comment_path] = MFComment( - "\n", "", simulation_data, 0 - ) - else: - self.blk_trailing_comment_path = ("blk_trailing_comment",) - self.blk_post_comment_path = ("blk_post_comment",) - - def __lt__(self, other): - transient_key = self.get_transient_key() - if transient_key is None: - return True - else: - other_key = other.get_transient_key() - if other_key is None: - return False - else: - return transient_key < other_key - - def build_header_variables( - self, - simulation_data, - block_header_structure, - block_path, - data, - dimensions, - ): - """Builds data objects to hold header variables.""" - self.data_items = [] - var_path = block_path + (block_header_structure[0].name,) - - # fix up data - fixed_data = [] - if ( - block_header_structure[0].data_item_structures[0].type - == DatumType.keyword - ): - data_item = block_header_structure[0].data_item_structures[0] - fixed_data.append(data_item.name) - if isinstance(data, tuple): - data = list(data) - if isinstance(data, list): - fixed_data = fixed_data + data - else: - fixed_data.append(data) - if len(fixed_data) > 0: - fixed_data = [tuple(fixed_data)] - # create data object - new_data = self.block.data_factory( - simulation_data, - None, - block_header_structure[0], - True, - var_path, - dimensions, - fixed_data, - ) - - self.add_data_item(new_data, data) - - def add_data_item(self, new_data, data): - """Adds data to the block.""" - self.data_items.append(new_data) - while isinstance(data, list): - if len(data) > 0: - data = data[0] - else: - data = None - if not isinstance(data, tuple): - data = (data,) - self.blk_trailing_comment_path += data - self.blk_post_comment_path += data - - def is_same_header(self, block_header): - """Checks if `block_header` is the same header as this header.""" - if len(self.variable_strings) > 0: - if len(self.variable_strings) != len( - block_header.variable_strings - ): - return False - else: - for sitem, oitem in zip( - self.variable_strings, block_header.variable_strings - ): - if sitem != oitem: - return False - return True - elif ( - len(self.data_items) > 0 and len(block_header.variable_strings) > 0 - ): - typ_obj = ( - self.data_items[0].structure.data_item_structures[0].type_obj - ) - if typ_obj == int or typ_obj == float: - return bool( - self.variable_strings[0] - == block_header.variable_strings[0] - ) - else: - return True - elif len(self.data_items) == len(block_header.variable_strings): - return True - return False - - def get_comment(self): - """Get block header comment""" - if self.simulation_data is None: - return self.comment - else: - return self.simulation_data.mfdata[self.comment_path] - - def connect_to_dict(self, simulation_data, path, comment=None): - """Add comment to the simulation dictionary""" - self.simulation_data = simulation_data - self.path = path - self.comment_path = path + ("blk_hdr_comment",) - if comment is None: - simulation_data.mfdata[self.comment_path] = self.comment - else: - simulation_data.mfdata[self.comment_path] = comment - self.comment = None - - def write_header(self, fd): - """Writes block header to file object `fd`. - - Parameters - ---------- - fd : file object - File object to write block header to. - - """ - fd.write(f"BEGIN {self.name}") - if len(self.data_items) > 0: - if isinstance(self.data_items[0], mfdatascalar.MFScalar): - one_based = ( - self.data_items[0].structure.type == DatumType.integer - ) - entry = self.data_items[0].get_file_entry( - values_only=True, one_based=one_based - ) - else: - entry = self.data_items[0].get_file_entry() - fd.write(str(entry.rstrip())) - if len(self.data_items) > 1: - for data_item in self.data_items[1:]: - entry = data_item.get_file_entry(values_only=True) - fd.write(str(entry).rstrip()) - if self.get_comment().text: - fd.write(" ") - self.get_comment().write(fd) - fd.write("\n") - - def write_footer(self, fd): - """Writes block footer to file object `fd`. - - Parameters - ---------- - fd : file object - File object to write block footer to. - - """ - fd.write(f"END {self.name}") - if len(self.data_items) > 0: - one_based = self.data_items[0].structure.type == DatumType.integer - if isinstance(self.data_items[0], mfdatascalar.MFScalar): - entry = self.data_items[0].get_file_entry( - values_only=True, one_based=one_based - ) - else: - entry = self.data_items[0].get_file_entry() - fd.write(str(entry.rstrip())) - fd.write("\n") - - def get_transient_key(self, data_path=None): - """Get transient key associated with this block header.""" - transient_key = None - for index in range(0, len(self.data_items)): - if self.data_items[index].structure.type != DatumType.keyword: - if data_path == self.data_items[index].path: - # avoid infinite recursion - return True - transient_key = self.data_items[index].get_data() - if isinstance(transient_key, np.recarray): - item_struct = self.data_items[index].structure - key_index = item_struct.first_non_keyword_index() - if not ( - key_index is not None - and len(transient_key[0]) > key_index - ): - if key_index is None: - raise FlopyException( - "Block header index could " - "not be determined." - ) - else: - raise FlopyException( - 'Block header index "{}" ' - 'must be less than "{}"' - ".".format(key_index, len(transient_key[0])) - ) - transient_key = transient_key[0][key_index] - break - return transient_key - - -class MFBlock: - """ - Represents a block in a MF6 input file. This class is used internally - by FloPy and use by users of the FloPy library is not recommended. - - Parameters - ---------- - simulation_data : MFSimulationData - Data specific to this simulation - dimensions : MFDimensions - Describes model dimensions including model grid and simulation time - structure : MFVariableStructure - Structure describing block - path : tuple - Unique path to block - - Attributes - ---------- - block_headers : MFBlockHeader - Block header text (BEGIN/END), header variables, comments in the - header - structure : MFBlockStructure - Structure describing block - path : tuple - Unique path to block - datasets : OrderDict - Dictionary of dataset objects with keys that are the name of the - dataset - datasets_keyword : dict - Dictionary of dataset objects with keys that are key words to identify - start of dataset - enabled : bool - If block is being used in the simulation - - """ - - def __init__( - self, - simulation_data, - dimensions, - structure, - path, - model_or_sim, - container_package, - ): - self._simulation_data = simulation_data - self._dimensions = dimensions - self._model_or_sim = model_or_sim - self._container_package = container_package - self.block_headers = [ - MFBlockHeader( - structure.name, - [], - MFComment("", path, simulation_data, 0), - simulation_data, - path, - self, - ) - ] - self.structure = structure - self.path = path - self.datasets = {} - self.datasets_keyword = {} - # initially disable if optional - self.enabled = structure.number_non_optional_data() > 0 - self.loaded = False - self.external_file_name = None - self._structure_init() - - def __repr__(self): - return self._get_data_str(True) - - def __str__(self): - return self._get_data_str(False) - - def _get_data_str(self, formal): - data_str = "" - for dataset in self.datasets.values(): - if formal: - ds_repr = repr(dataset) - if len(ds_repr.strip()) > 0: - data_str = ( - f"{data_str}{dataset.structure.name}\n{dataset!r}\n" - ) - else: - ds_str = str(dataset) - if len(ds_str.strip()) > 0: - data_str = ( - f"{data_str}{dataset.structure.name}\n{dataset!s}\n" - ) - return data_str - - # return an MFScalar, MFList, or MFArray - def data_factory( - self, - sim_data, - model_or_sim, - structure, - enable, - path, - dimensions, - data=None, - package=None, - ): - """Creates the appropriate data child object derived from MFData.""" - data_type = structure.get_datatype() - # examine the data structure and determine the data type - if ( - data_type == mfstructure.DataType.scalar_keyword - or data_type == mfstructure.DataType.scalar - ): - return mfdatascalar.MFScalar( - sim_data, - model_or_sim, - structure, - data, - enable, - path, - dimensions, - ) - elif ( - data_type == mfstructure.DataType.scalar_keyword_transient - or data_type == mfstructure.DataType.scalar_transient - ): - trans_scalar = mfdatascalar.MFScalarTransient( - sim_data, model_or_sim, structure, enable, path, dimensions - ) - if data is not None: - trans_scalar.set_data(data, key=0) - return trans_scalar - elif data_type == mfstructure.DataType.array: - return mfdataarray.MFArray( - sim_data, - model_or_sim, - structure, - data, - enable, - path, - dimensions, - self, - ) - elif data_type == mfstructure.DataType.array_transient: - trans_array = mfdataarray.MFTransientArray( - sim_data, - model_or_sim, - structure, - enable, - path, - dimensions, - self, - ) - if data is not None: - trans_array.set_data(data, key=0) - return trans_array - elif data_type == mfstructure.DataType.list: - if ( - structure.basic_item - and self._container_package.package_type.lower() != "nam" - and self._simulation_data.use_pandas - ): - return mfdataplist.MFPandasList( - sim_data, - model_or_sim, - structure, - data, - enable, - path, - dimensions, - package, - self, - ) - else: - return mfdatalist.MFList( - sim_data, - model_or_sim, - structure, - data, - enable, - path, - dimensions, - package, - self, - ) - elif data_type == mfstructure.DataType.list_transient: - if structure.basic_item and self._simulation_data.use_pandas: - trans_list = mfdataplist.MFPandasTransientList( - sim_data, - model_or_sim, - structure, - enable, - path, - dimensions, - package, - self, - ) - else: - trans_list = mfdatalist.MFTransientList( - sim_data, - model_or_sim, - structure, - enable, - path, - dimensions, - package, - self, - ) - if data is not None: - trans_list.set_data(data, key=0, autofill=True) - return trans_list - elif data_type == mfstructure.DataType.list_multiple: - mult_list = mfdatalist.MFMultipleList( - sim_data, - model_or_sim, - structure, - enable, - path, - dimensions, - package, - self, - ) - if data is not None: - mult_list.set_data(data, key=0, autofill=True) - return mult_list - - def _structure_init(self): - # load datasets keywords into dictionary - for dataset_struct in self.structure.data_structures.values(): - for keyword in dataset_struct.get_keywords(): - self.datasets_keyword[keyword] = dataset_struct - # load block header data items into dictionary - for dataset in self.structure.block_header_structure: - self._new_dataset(dataset.name, dataset, True, None) - - def set_model_relative_path(self, model_ws): - """Sets `model_ws` as the model path relative to the simulation's - path. - - Parameters - ---------- - model_ws : str - Model path relative to the simulation's path. - """ - # update datasets - for key, dataset in self.datasets.items(): - if dataset.structure.file_data: - try: - file_data = dataset.get_data() - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message="Error occurred while " - "getting file data from " - '"{}"'.format(dataset.structure.name), - ) - if file_data: - # update file path location for all file paths - for file_line in file_data: - old_file_name = os.path.split(file_line[0])[1] - file_line[0] = os.path.join(model_ws, old_file_name) - # update block headers - for block_header in self.block_headers: - for dataset in block_header.data_items: - if dataset.structure.file_data: - try: - file_data = dataset.get_data() - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message="Error occurred while " - "getting file data from " - '"{}"'.format(dataset.structure.name), - ) - - if file_data: - # update file path location for all file paths - for file_line in file_data: - old_file_path, old_file_name = os.path.split( - file_line[1] - ) - new_file_path = os.path.join( - model_ws, old_file_name - ) - # update transient keys of datasets within the - # block - for key, idataset in self.datasets.items(): - if isinstance(idataset, mfdata.MFTransient): - idataset.update_transient_key( - file_line[1], new_file_path - ) - file_line[1] = os.path.join( - model_ws, old_file_name - ) - - def add_dataset(self, dataset_struct, data, var_path): - """Add data to this block.""" - try: - self.datasets[var_path[-1]] = self.data_factory( - self._simulation_data, - self._model_or_sim, - dataset_struct, - True, - var_path, - self._dimensions, - data, - self._container_package, - ) - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message="Error occurred while adding" - ' dataset "{}" to block ' - '"{}"'.format(dataset_struct.name, self.structure.name), - ) - - self._simulation_data.mfdata[var_path] = self.datasets[var_path[-1]] - dtype = dataset_struct.get_datatype() - if ( - dtype == mfstructure.DataType.list_transient - or dtype == mfstructure.DataType.list_multiple - or dtype == mfstructure.DataType.array_transient - ): - # build repeating block header(s) - if isinstance(data, dict): - # Add block headers for each dictionary key - for index in data: - if isinstance(index, tuple): - header_list = list(index) - else: - header_list = [index] - self._build_repeating_header(header_list) - elif isinstance(data, list): - # Add a single block header of value 0 - self._build_repeating_header([0]) - elif ( - dtype != mfstructure.DataType.list_multiple - and data is not None - ): - self._build_repeating_header([[0]]) - - return self.datasets[var_path[-1]] - - def _build_repeating_header(self, header_data): - if self.header_exists(header_data[0]): - return - if ( - len(self.block_headers[-1].data_items) == 1 - and self.block_headers[-1].data_items[0].get_data() is not None - ): - block_header_path = self.path + (len(self.block_headers) + 1,) - block_header = MFBlockHeader( - self.structure.name, - [], - MFComment("", self.path, self._simulation_data, 0), - self._simulation_data, - block_header_path, - self, - ) - self.block_headers.append(block_header) - else: - block_header_path = self.path + (len(self.block_headers),) - - struct = self.structure - last_header = self.block_headers[-1] - try: - last_header.build_header_variables( - self._simulation_data, - struct.block_header_structure, - block_header_path, - header_data, - self._dimensions, - ) - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message="Error occurred while building" - " block header variables for block " - '"{}"'.format(last_header.name), - ) - - def _new_dataset( - self, key, dataset_struct, block_header=False, initial_val=None - ): - dataset_path = self.path + (key,) - if block_header: - if ( - dataset_struct.type == DatumType.integer - and initial_val is not None - and len(initial_val) >= 1 - and dataset_struct.get_record_size()[0] == 1 - ): - # stress periods are stored 0 based - initial_val = int(initial_val[0]) - 1 - if isinstance(initial_val, list): - initial_val_path = tuple(initial_val) - initial_val = [tuple(initial_val)] - else: - initial_val_path = initial_val - try: - new_data = self.data_factory( - self._simulation_data, - self._model_or_sim, - dataset_struct, - True, - dataset_path, - self._dimensions, - initial_val, - self._container_package, - ) - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message="Error occurred while adding" - ' dataset "{}" to block ' - '"{}"'.format(dataset_struct.name, self.structure.name), - ) - self.block_headers[-1].add_data_item(new_data, initial_val_path) - - else: - try: - self.datasets[key] = self.data_factory( - self._simulation_data, - self._model_or_sim, - dataset_struct, - True, - dataset_path, - self._dimensions, - initial_val, - self._container_package, - ) - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message="Error occurred while adding" - ' dataset "{}" to block ' - '"{}"'.format(dataset_struct.name, self.structure.name), - ) - for keyword in dataset_struct.get_keywords(): - self.datasets_keyword[keyword] = dataset_struct - - def is_empty(self): - """Returns true if this block is empty.""" - for key, dataset in self.datasets.items(): - try: - has_data = dataset.has_data() - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message="Error occurred while verifying" - ' data of dataset "{}" in block ' - '"{}"'.format(dataset.structure.name, self.structure.name), - ) - - if has_data is not None and has_data: - return False - return True - - def load(self, block_header, fd, strict=True): - """Loads block from file object. file object must be advanced to - beginning of block before calling. - - Parameters - ---------- - block_header : MFBlockHeader - Block header for block block being loaded. - fd : file - File descriptor of file being loaded - strict : bool - Enforce strict MODFLOW 6 file format. - """ - # verify number of header variables - if ( - len(block_header.variable_strings) - < self.structure.number_non_optional_block_header_data() - ): - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - warning_str = ( - 'WARNING: Block header for block "{}" does not ' - "contain the correct number of " - "variables {}".format(block_header.name, self.path) - ) - print(warning_str) - return - - if self.loaded: - # verify header has not already been loaded - for bh_current in self.block_headers: - if bh_current.is_same_header(block_header): - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - warning_str = ( - 'WARNING: Block header for block "{}" is ' - "not a unique block header " - "{}".format(block_header.name, self.path) - ) - print(warning_str) - return - - # init - self.enabled = True - if not self.loaded: - self.block_headers = [] - block_header.block = self - self.block_headers.append(block_header) - - # process any header variable - if len(self.structure.block_header_structure) > 0: - dataset = self.structure.block_header_structure[0] - self._new_dataset( - dataset.name, - dataset, - True, - self.block_headers[-1].variable_strings, - ) - - # handle special readasarrays case - if ( - self._container_package.structure.read_as_arrays - or ( - hasattr(self._container_package, "aux") - and self._container_package.aux.structure.layered - ) - ): - # auxiliary variables may appear with aux variable name as keyword - aux_vars = self._container_package.auxiliary.get_data() - if aux_vars is not None: - for var_name in list(aux_vars[0])[1:]: - self.datasets_keyword[(var_name,)] = ( - self._container_package.aux.structure - ) - - comments = [] - - # capture any initial comments - initial_comment = MFComment("", "", 0) - fd_block = fd - line = fd_block.readline() - datautil.PyListUtil.reset_delimiter_used() - arr_line = datautil.PyListUtil.split_data_line(line) - post_data_comments = MFComment("", "", self._simulation_data, 0) - while MFComment.is_comment(line, True): - initial_comment.add_text(line) - line = fd_block.readline() - arr_line = datautil.PyListUtil.split_data_line(line) - - # if block not empty - external_file_info = None - if not (len(arr_line[0]) > 2 and arr_line[0][:3].upper() == "END"): - if arr_line[0].lower() == "open/close": - # open block contents from external file - fd_block.readline() - root_path = self._simulation_data.mfpath.get_sim_path() - try: - file_name = os.path.split(arr_line[1])[-1] - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print( - f' opening external file "{file_name}"...' - ) - external_file_info = arr_line - except: - type_, value_, traceback_ = sys.exc_info() - message = f'Error reading external file specified in line "{line}"' - raise MFDataException( - self._container_package.model_name, - self._container_package._get_pname(), - self.path, - "reading external file", - self.structure.name, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self._simulation_data.debug, - ) - if len(self.structure.data_structures) <= 1: - # load a single data set - dataset = self.datasets[next(iter(self.datasets))] - try: - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print( - f" loading data {dataset.structure.name}..." - ) - next_line = dataset.load( - line, - fd_block, - self.block_headers[-1], - initial_comment, - external_file_info, - ) - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message='Error occurred while loading data "{}" in ' - 'block "{}" from file "{}"' - ".".format( - dataset.structure.name, - self.structure.name, - fd_block.name, - ), - ) - package_info_list = self._get_package_info(dataset) - if package_info_list is not None: - for package_info in package_info_list: - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print( - f" loading child package {package_info[0]}..." - ) - fname = package_info[1] - if package_info[2] is not None: - fname = os.path.join(package_info[2], fname) - filemgr = self._simulation_data.mfpath - fname = filemgr.strip_model_relative_path( - self._model_or_sim.name, fname - ) - pkg = self._model_or_sim.load_package( - package_info[0], - fname, - package_info[1], - True, - "", - package_info[3], - self._container_package, - ) - if hasattr(self._container_package, package_info[0]): - package_group = getattr( - self._container_package, package_info[0] - ) - package_group._append_package( - pkg, pkg.filename, False - ) - - if next_line[1] is not None: - arr_line = datautil.PyListUtil.split_data_line( - next_line[1] - ) - else: - arr_line = "" - # capture any trailing comments - dataset.post_data_comments = post_data_comments - while arr_line and ( - len(next_line[1]) <= 2 or arr_line[0][:3].upper() != "END" - ): - next_line[1] = fd_block.readline().strip() - arr_line = datautil.PyListUtil.split_data_line( - next_line[1] - ) - if arr_line and ( - len(next_line[1]) <= 2 - or arr_line[0][:3].upper() != "END" - ): - post_data_comments.add_text(" ".join(arr_line)) - else: - # look for keyword and store line as data or comment - try: - key, results = self._find_data_by_keyword( - line, fd_block, initial_comment - ) - except MFInvalidTransientBlockHeaderException as e: - warning_str = f"WARNING: {e}" - print(warning_str) - self.block_headers.pop() - return - - self._save_comments(arr_line, line, key, comments) - if results[1] is None or results[1][:3].upper() != "END": - # block consists of unordered datasets - # load the data sets out of order based on - # initial constants - line = " " - while line != "": - line = fd_block.readline() - arr_line = datautil.PyListUtil.split_data_line(line) - if arr_line: - # determine if at end of block - if ( - len(arr_line[0]) > 2 - and arr_line[0][:3].upper() == "END" - ): - break - # look for keyword and store line as data o - # r comment - key, result = self._find_data_by_keyword( - line, fd_block, initial_comment - ) - self._save_comments(arr_line, line, key, comments) - if ( - result[1] is not None - and result[1][:3].upper() == "END" - ): - break - else: - # block empty, store empty array in block variables - empty_arr = [] - for ds in self.datasets.values(): - if isinstance(ds, mfdata.MFTransient): - transient_key = block_header.get_transient_key() - ds.set_data(empty_arr, key=transient_key) - self.loaded = True - self.is_valid() - - def _find_data_by_keyword(self, line, fd, initial_comment): - first_key = None - nothing_found = False - next_line = [True, line] - while next_line[0] and not nothing_found: - arr_line = datautil.PyListUtil.split_data_line(next_line[1]) - key = datautil.find_keyword(arr_line, self.datasets_keyword) - if key is not None: - ds_name = self.datasets_keyword[key].name - try: - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print(f" loading data {ds_name}...") - next_line = self.datasets[ds_name].load( - next_line[1], - fd, - self.block_headers[-1], - initial_comment, - ) - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message="Error occurred while " - 'loading data "{}" in ' - 'block "{}" from file "{}"' - ".".format(ds_name, self.structure.name, fd.name), - ) - - # see if first item's name indicates a reference to - # another package - package_info_list = self._get_package_info( - self.datasets[ds_name] - ) - if package_info_list is not None: - for package_info in package_info_list: - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print( - f" loading child package {package_info[1]}..." - ) - fname = package_info[1] - if package_info[2] is not None: - fname = os.path.join(package_info[2], fname) - filemgr = self._simulation_data.mfpath - fname = filemgr.strip_model_relative_path( - self._model_or_sim.name, fname - ) - pkg = self._model_or_sim.load_package( - package_info[0], - fname, - package_info[1], - True, - "", - package_info[3], - self._container_package, - ) - if hasattr(self._container_package, package_info[0]): - package_group = getattr( - self._container_package, package_info[0] - ) - package_group._append_package( - pkg, pkg.filename, False - ) - if first_key is None: - first_key = key - nothing_found = False - elif ( - arr_line[0].lower() == "readasarrays" - and self.path[-1].lower() == "options" - and self._container_package.structure.read_as_arrays is False - ): - error_msg = ( - "ERROR: Attempting to read a ReadAsArrays " - "package as a non-ReadAsArrays " - "package {}".format(self.path) - ) - raise ReadAsArraysException(error_msg) - else: - nothing_found = True - - if first_key is None: - # look for recarrays. if there is a lone recarray in this block, - # use it by default - recarrays = self.structure.get_all_recarrays() - if len(recarrays) != 1: - return key, [None, None] - dataset = self.datasets[recarrays[0].name] - ds_result = dataset.load( - line, fd, self.block_headers[-1], initial_comment - ) - - # see if first item's name indicates a reference to another - # package - package_info_list = self._get_package_info(dataset) - if package_info_list is not None: - for package_info in package_info_list: - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print( - f" loading child package {package_info[0]}..." - ) - fname = package_info[1] - if package_info[2] is not None: - fname = os.path.join(package_info[2], fname) - filemgr = self._simulation_data.mfpath - fname = filemgr.strip_model_relative_path( - self._model_or_sim.name, fname - ) - pkg = self._model_or_sim.load_package( - package_info[0], - fname, - None, - True, - "", - package_info[3], - self._container_package, - ) - if hasattr(self._container_package, package_info[0]): - package_group = getattr( - self._container_package, package_info[0] - ) - package_group._append_package(pkg, pkg.filename, False) - - return recarrays[0].keyword, ds_result - else: - return first_key, next_line - - def _get_package_info(self, dataset): - if not dataset.structure.file_data: - return None - for index in range(0, len(dataset.structure.data_item_structures)): - data_item = dataset.structure.data_item_structures[index] - if ( - data_item.type == DatumType.keyword - or data_item.type == DatumType.string - ): - item_name = data_item.name - package_type = item_name[:-1] - model_type = self._model_or_sim.structure.model_type - # not all packages have the same naming convention - # try different naming conventions to find the appropriate - # package - package_types = [ - package_type, - f"{self._container_package.package_type}" - f"{package_type}", - ] - package_type_found = None - for ptype in package_types: - if ( - PackageContainer.package_factory(ptype, model_type) - is not None - ): - package_type_found = ptype - break - if package_type_found is not None: - try: - data = dataset.get_data() - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message="Error occurred while " - 'getting data from "{}" ' - 'in block "{}".'.format( - dataset.structure.name, self.structure.name - ), - ) - package_info_list = [] - if isinstance(data, np.recarray): - for row in data: - self._add_to_info_list( - package_info_list, - row[index], - package_type_found, - ) - else: - self._add_to_info_list( - package_info_list, data, package_type_found - ) - - return package_info_list - return None - - def _add_to_info_list( - self, package_info_list, file_location, package_type_found - ): - file_path, file_name = os.path.split(file_location) - dict_package_name = f"{package_type_found}_{self.path[-2]}" - package_info_list.append( - ( - package_type_found, - file_name, - file_path, - dict_package_name, - ) - ) - - def _save_comments(self, arr_line, line, key, comments): - # FIX: Save these comments somewhere in the data set - if key not in self.datasets_keyword: - if MFComment.is_comment(key, True): - if comments: - comments.append("\n") - comments.append(arr_line) - - def write(self, fd, ext_file_action=ExtFileAction.copy_relative_paths): - """Writes block to a file object. - - Parameters - ---------- - fd : file object - File object to write to. - - """ - # never write an empty block - is_empty = self.is_empty() - if ( - is_empty - and self.structure.name.lower() != "exchanges" - and self.structure.name.lower() != "options" - and self.structure.name.lower() != "sources" - and self.structure.name.lower() != "stressperioddata" - ): - return - if self.structure.repeating(): - repeating_datasets = self._find_repeating_datasets() - for repeating_dataset in repeating_datasets: - # resolve any missing block headers - self._add_missing_block_headers(repeating_dataset) - for block_header in sorted(self.block_headers): - # write block - self._write_block(fd, block_header, ext_file_action) - else: - self._write_block(fd, self.block_headers[0], ext_file_action) - - def _add_missing_block_headers(self, repeating_dataset): - key_data_list = repeating_dataset.get_active_key_list() - # assemble a dictionary of data keys and empty keys - key_dict = {} - for key in key_data_list: - key_dict[key[0]] = True - for key, value in repeating_dataset.empty_keys.items(): - if value: - key_dict[key] = True - for key in key_dict.keys(): - has_data = repeating_dataset.has_data(key) - empty_key = ( - key in repeating_dataset.empty_keys - and repeating_dataset.empty_keys[key] - ) - if not self.header_exists(key) and (has_data or empty_key): - self._build_repeating_header([key]) - - def header_exists(self, key, data_path=None): - if not isinstance(key, list): - if key is None: - return - comp_key_list = [key] - else: - comp_key_list = key - for block_header in self.block_headers: - transient_key = block_header.get_transient_key(data_path) - if transient_key is True: - return - for comp_key in comp_key_list: - if transient_key is not None and transient_key == comp_key: - return True - return False - - def set_all_data_external( - self, - base_name, - check_data=True, - external_data_folder=None, - binary=False, - ): - """Sets the block's list and array data to be stored externally, - base_name is external file name's prefix, check_data determines - if data error checking is enabled during this process. - - Warning - ------- - The MF6 check mechanism is deprecated pending reimplementation - in a future release. While the checks API will remain in place - through 3.x, it may be unstable, and will likely change in 4.x. - - Parameters - ---------- - base_name : str - Base file name of external files where data will be written to. - check_data : bool - Whether to do data error checking. - external_data_folder - Folder where external data will be stored - binary: bool - Whether file will be stored as binary - - """ - - for key, dataset in self.datasets.items(): - lst_data = isinstance(dataset, mfdatalist.MFList) or isinstance( - dataset, mfdataplist.MFPandasList - ) - if ( - isinstance(dataset, mfdataarray.MFArray) - or (lst_data and dataset.structure.type == DatumType.recarray) - and dataset.enabled - ): - if not binary or ( - lst_data - and ( - dataset.data_dimensions.package_dim.boundnames() - or not dataset.structure.basic_item - ) - ): - ext = "txt" - binary = False - else: - ext = "bin" - file_path = f"{base_name}_{dataset.structure.name}.{ext}" - replace_existing_external = False - if external_data_folder is not None: - # get simulation root path - root_path = self._simulation_data.mfpath.get_sim_path() - # get model relative path, if it exists - if isinstance(self._model_or_sim, ModelInterface): - name = self._model_or_sim.name - rel_path = ( - self._simulation_data.mfpath.model_relative_path[ - name - ] - ) - if rel_path is not None: - root_path = os.path.join(root_path, rel_path) - full_path = os.path.join(root_path, external_data_folder) - if not os.path.exists(full_path): - # create new external data folder - os.makedirs(full_path) - file_path = os.path.join(external_data_folder, file_path) - replace_existing_external = True - dataset.store_as_external_file( - file_path, - replace_existing_external=replace_existing_external, - check_data=check_data, - binary=binary, - ) - - def set_all_data_internal(self, check_data=True): - """Sets the block's list and array data to be stored internally, - check_data determines if data error checking is enabled during this - process. - - Warning - ------- - The MF6 check mechanism is deprecated pending reimplementation - in a future release. While the checks API will remain in place - through 3.x, it may be unstable, and will likely change in 4.x. - - Parameters - ---------- - check_data : bool - Whether to do data error checking. - - """ - - for key, dataset in self.datasets.items(): - if ( - isinstance(dataset, mfdataarray.MFArray) - or ( - ( - isinstance(dataset, mfdatalist.MFList) - or isinstance(dataset, mfdataplist.MFPandasList) - ) - and dataset.structure.type == DatumType.recarray - ) - and dataset.enabled - ): - dataset.store_internal(check_data=check_data) - - def _find_repeating_datasets(self): - repeating_datasets = [] - for key, dataset in self.datasets.items(): - if dataset.repeating: - repeating_datasets.append(dataset) - return repeating_datasets - - def _prepare_external(self, fd, file_name, binary=False): - fd_main = fd - fd_path = self._simulation_data.mfpath.get_model_path(self.path[0]) - # resolve full file and folder path - fd_file_path = os.path.join(fd_path, file_name) - fd_folder_path = os.path.split(fd_file_path)[0] - if fd_folder_path != "": - if not os.path.exists(fd_folder_path): - # create new external data folder - os.makedirs(fd_folder_path) - return fd_main, fd_file_path - - def _write_block(self, fd, block_header, ext_file_action): - transient_key = None - basic_list = False - dataset_one = list(self.datasets.values())[0] - if isinstance( - dataset_one, - (mfdataplist.MFPandasList, mfdataplist.MFPandasTransientList), - ): - basic_list = True - for dataset in self.datasets.values(): - assert isinstance( - dataset, - ( - mfdataplist.MFPandasList, - mfdataplist.MFPandasTransientList, - ), - ) - # write block header - block_header.write_header(fd) - if len(block_header.data_items) > 0: - transient_key = block_header.get_transient_key() - - # gather data sets to write - data_set_output = [] - data_found = False - for key, dataset in self.datasets.items(): - try: - if transient_key is None: - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print( - f" writing data {dataset.structure.name}..." - ) - if basic_list: - ext_fname = dataset.external_file_name() - if ext_fname is not None: - binary = dataset.binary_ext_data() - # write block contents to external file - fd_main, fd = self._prepare_external( - fd, ext_fname, binary - ) - dataset.write_file_entry(fd, fd_main=fd_main) - fd = fd_main - else: - dataset.write_file_entry(fd) - else: - data_set_output.append( - dataset.get_file_entry( - ext_file_action=ext_file_action - ) - ) - data_found = True - else: - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print( - " writing data {} ({}).." ".".format( - dataset.structure.name, transient_key - ) - ) - if basic_list: - ext_fname = dataset.external_file_name(transient_key) - if ext_fname is not None: - binary = dataset.binary_ext_data(transient_key) - # write block contents to external file - fd_main, fd = self._prepare_external( - fd, ext_fname, binary - ) - dataset.write_file_entry( - fd, - transient_key, - ext_file_action=ext_file_action, - fd_main=fd_main, - ) - fd = fd_main - else: - dataset.write_file_entry( - fd, - transient_key, - ext_file_action=ext_file_action, - ) - else: - if dataset.repeating: - output = dataset.get_file_entry( - transient_key, ext_file_action=ext_file_action - ) - if output is not None: - data_set_output.append(output) - data_found = True - else: - data_set_output.append( - dataset.get_file_entry( - ext_file_action=ext_file_action - ) - ) - data_found = True - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message=( - "Error occurred while writing data " - f'"{dataset.structure.name}" in block ' - f'"{self.structure.name}" to file "{fd.name}"' - ), - ) - if not data_found: - return - if not basic_list: - # write block header - block_header.write_header(fd) - - if self.external_file_name is not None: - indent_string = self._simulation_data.indent_string - fd.write( - f"{indent_string}open/close " - f'"{self.external_file_name}"\n' - ) - # write block contents to external file - fd_main, fd = self._prepare_external( - fd, self.external_file_name - ) - # write data sets - for output in data_set_output: - fd.write(output) - - # write trailing comments - pth = block_header.blk_trailing_comment_path - if pth in self._simulation_data.mfdata: - self._simulation_data.mfdata[pth].write(fd) - - if self.external_file_name is not None and not basic_list: - # switch back writing to package file - fd.close() - fd = fd_main - - # write block footer - block_header.write_footer(fd) - - # write post block comments - pth = block_header.blk_post_comment_path - if pth in self._simulation_data.mfdata: - self._simulation_data.mfdata[pth].write(fd) - - # write extra line if comments are off - if not self._simulation_data.comments_on: - fd.write("\n") - - def is_allowed(self): - """Determine if block is valid based on the values of dependent - MODFLOW variables.""" - if self.structure.variable_dependant_path: - # fill in empty part of the path with the current path - if len(self.structure.variable_dependant_path) == 3: - dependant_var_path = ( - self.path[0], - ) + self.structure.variable_dependant_path - elif len(self.structure.variable_dependant_path) == 2: - dependant_var_path = ( - self.path[0], - self.path[1], - ) + self.structure.variable_dependant_path - elif len(self.structure.variable_dependant_path) == 1: - dependant_var_path = ( - self.path[0], - self.path[1], - self.path[2], - ) + self.structure.variable_dependant_path - else: - dependant_var_path = None - - # get dependency - dependant_var = None - mf_data = self._simulation_data.mfdata - if dependant_var_path in mf_data: - dependant_var = mf_data[dependant_var_path] - - # resolve dependency - if self.structure.variable_value_when_active[0] == "Exists": - exists = self.structure.variable_value_when_active[1] - if dependant_var and exists.lower() == "true": - return True - elif not dependant_var and exists.lower() == "false": - return True - else: - return False - elif not dependant_var: - return False - elif self.structure.variable_value_when_active[0] == ">": - min_val = self.structure.variable_value_when_active[1] - if dependant_var > float(min_val): - return True - else: - return False - elif self.structure.variable_value_when_active[0] == "<": - max_val = self.structure.variable_value_when_active[1] - if dependant_var < float(max_val): - return True - else: - return False - return True - - def is_valid(self): - """ - Returns true if the block is valid. - """ - # check data sets - for dataset in self.datasets.values(): - # Non-optional datasets must be enabled - if not dataset.structure.optional and not dataset.enabled: - return False - # Enabled blocks must be valid - if dataset.enabled and not dataset.is_valid: - return False - # check variables - for block_header in self.block_headers: - for dataset in block_header.data_items: - # Non-optional datasets must be enabled - if not dataset.structure.optional and not dataset.enabled: - return False - # Enabled blocks must be valid - if dataset.enabled and not dataset.is_valid(): - return False - - -class MFPackage(PackageInterface): - """ - Provides an interface for the user to specify data to build a package. - - Parameters - ---------- - parent : MFModel, MFSimulation, or MFPackage - The parent model, simulation, or package containing this package - package_type : str - String defining the package type - filename : str or PathLike - Name or path of file where this package is stored - quoted_filename : str - Filename with quotes around it when there is a space in the name - pname : str - Package name - loading_package : bool - Whether or not to add this package to the parent container's package - list during initialization - - Attributes - ---------- - blocks : dict - Dictionary of blocks contained in this package by block name - path : tuple - Data dictionary path to this package - structure : PackageStructure - Describes the blocks and data contain in this package - dimensions : PackageDimension - Resolves data dimensions for data within this package - - """ - - def __init__( - self, - parent, - package_type, - filename=None, - pname=None, - loading_package=False, - **kwargs, - ): - parent_file = kwargs.pop("parent_file", None) - if isinstance(parent, MFPackage): - self.model_or_sim = parent.model_or_sim - self.parent_file = parent - elif parent_file is not None: - self.model_or_sim = parent - self.parent_file = parent_file - else: - self.model_or_sim = parent - self.parent_file = None - _internal_package = kwargs.pop("_internal_package", False) - if _internal_package: - self.internal_package = True - else: - self.internal_package = False - self._data_list = [] - self._package_type = package_type - if self.model_or_sim.type == "Model" and package_type.lower() != "nam": - self.model_name = self.model_or_sim.name - else: - self.model_name = None - - # a package must have a dfn_file_name - if not hasattr(self, "dfn_file_name"): - self.dfn_file_name = "" - - if ( - self.model_or_sim.type != "Model" - and self.model_or_sim.type != "Simulation" - ): - message = ( - "Invalid model_or_sim parameter. Expecting either a " - 'model or a simulation. Instead type "{}" was ' - "given.".format(type(self.model_or_sim)) - ) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.model_name, - pname, - "", - "initializing package", - None, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self.model_or_sim.simulation_data.debug, - ) - - self._package_container = PackageContainer( - self.model_or_sim.simulation_data - ) - self.simulation_data = self.model_or_sim.simulation_data - - self.blocks = {} - self.container_type = [] - self.loading_package = loading_package - if pname is not None: - if not isinstance(pname, str): - message = ( - "Invalid pname parameter. Expecting type str. " - 'Instead type "{}" was ' - "given.".format(type(pname)) - ) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.model_name, - pname, - "", - "initializing package", - None, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self.model_or_sim.simulation_data.debug, - ) - - self.package_name = pname.lower() - else: - self.package_name = None - - if filename is None: - if self.model_or_sim.type == "Simulation": - # filename uses simulation base name - base_name = os.path.basename( - os.path.normpath(self.model_or_sim.name) - ) - self._filename = f"{base_name}.{package_type}" - else: - # filename uses model base name - self._filename = f"{self.model_or_sim.name}.{package_type}" - else: - if not isinstance(filename, (str, os.PathLike)): - message = ( - "Invalid fname parameter. Expecting type str. " - 'Instead type "{}" was ' - "given.".format(type(filename)) - ) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.model_name, - pname, - "", - "initializing package", - None, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self.model_or_sim.simulation_data.debug, - ) - self._filename = datautil.clean_filename( - str(filename).replace("\\", "/") - ) - self.path, self.structure = self.model_or_sim.register_package( - self, not loading_package, pname is None, filename is None - ) - self.dimensions = self.create_package_dimensions() - - if self.path is None: - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - print( - "WARNING: Package type {} failed to register property." - " {}".format(self._package_type, self.path) - ) - if self.parent_file is not None: - self.container_type.append(PackageContainerType.package) - # init variables that may be used later - self.post_block_comments = None - self.last_error = None - self.bc_color = "black" - self.__inattr = False - self._child_package_groups = {} - child_builder_call = kwargs.pop("child_builder_call", None) - if ( - self.parent_file is not None - and child_builder_call is None - and package_type in self.parent_file._child_package_groups - ): - # initialize as part of the parent's child package group - chld_pkg_grp = self.parent_file._child_package_groups[package_type] - chld_pkg_grp.init_package(self, self._filename, False) - - # remove any remaining valid kwargs - key_list = list(kwargs.keys()) - for key in key_list: - if "filerecord" in key and hasattr(self, f"{key}"): - kwargs.pop(f"{key}") - # check for extraneous kwargs - if len(kwargs) > 0: - kwargs_str = ", ".join(kwargs.keys()) - excpt_str = ( - f'Extraneous kwargs "{kwargs_str}" provided to MFPackage.' - ) - raise FlopyException(excpt_str) - - def __init_subclass__(cls): - """Register package type""" - super().__init_subclass__() - PackageContainer.packages_by_abbr[cls.package_abbr] = cls - - def __setattr__(self, name, value): - if hasattr(self, name) and getattr(self, name) is not None: - attribute = object.__getattribute__(self, name) - if attribute is not None and isinstance(attribute, mfdata.MFData): - try: - if isinstance(attribute, mfdatalist.MFList): - attribute.set_data(value, autofill=True) - else: - attribute.set_data(value) - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self.model_name, - package=self._get_pname(), - ) - return - - if all( - hasattr(self, attr) for attr in ["model_or_sim", "_package_type"] - ): - if hasattr(self.model_or_sim, "_mg_resync"): - if not self.model_or_sim._mg_resync: - self.model_or_sim._mg_resync = self._mg_resync - - super().__setattr__(name, value) - - def __repr__(self): - return self._get_data_str(True) - - def __str__(self): - return self._get_data_str(False) - - @property - def filename(self): - """Package's file name.""" - return self._filename - - @property - def quoted_filename(self): - """Package's file name with quotes if there is a space.""" - if " " in self._filename: - return f'"{self._filename}"' - return self._filename - - @filename.setter - def filename(self, fname): - """Package's file name.""" - if ( - isinstance(self.parent_file, MFPackage) - and self.package_type in self.parent_file._child_package_groups - ): - fname = datautil.clean_filename(fname) - try: - child_pkg_group = self.parent_file._child_package_groups[ - self.structure.file_type - ] - child_pkg_group._update_filename(self._filename, fname) - except Exception: - print( - "WARNING: Unable to update file name for parent" - f"package of {self.package_name}." - ) - if self.model_or_sim is not None and fname is not None: - if self._package_type != "nam": - self.model_or_sim.update_package_filename(self, fname) - self._filename = fname - - @property - def package_type(self): - """String describing type of package""" - return self._package_type - - @property - def name(self): - """Name of package""" - return [self.package_name] - - @name.setter - def name(self, name): - """Name of package""" - self.package_name = name - - @property - def parent(self): - """Parent package""" - return self.model_or_sim - - @parent.setter - def parent(self, parent): - """Parent package""" - assert False, "Do not use this setter to set the parent" - - @property - def plottable(self): - """If package is plottable""" - if self.model_or_sim.type == "Simulation": - return False - else: - return True - - @property - def output(self): - """ - Method to get output associated with a specific package - - Returns - ------- - MF6Output object - """ - return MF6Output(self) - - @property - def data_list(self): - """List of data in this package.""" - # return [data_object, data_object, ...] - return self._data_list - - @property - def package_key_dict(self): - """ - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_type_dict - - @property - def package_names(self): - """Returns a list of package names. - - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_names - - @property - def package_dict(self): - """ - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_dict - - @property - def package_type_dict(self): - """ - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_type_dict - - @property - def package_name_dict(self): - """ - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_name_dict - - @property - def package_filename_dict(self): - """ - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_filename_dict - - def get_package(self, name=None, type_only=False, name_only=False): - """ - Finds a package by package name, package key, package type, or partial - package name. returns either a single package, a list of packages, - or None. - - Parameters - ---------- - name : str - Name or type of the package, 'my-riv-1, 'RIV', 'LPF', etc. - type_only : bool - Search for package by type only - name_only : bool - Search for package by name only - - Returns - ------- - pp : Package object - - """ - return self._package_container.get_package(name, type_only, name_only) - - def add_package(self, package): - pkg_type = package.package_type.lower() - if pkg_type in self._package_container.package_type_dict: - for existing_pkg in self._package_container.package_type_dict[ - pkg_type - ]: - if existing_pkg is package: - # do not add the same package twice - return - self._package_container.add_package(package) - - def _get_aux_data(self, aux_names): - if hasattr(self, "stress_period_data"): - spd = self.stress_period_data.get_data() - if ( - 0 in spd - and spd[0] is not None - and aux_names[0][1] in spd[0].dtype.names - ): - return spd - if hasattr(self, "packagedata"): - pd = self.packagedata.get_data() - if aux_names[0][1] in pd.dtype.names: - return pd - if hasattr(self, "perioddata"): - spd = self.perioddata.get_data() - if ( - 0 in spd - and spd[0] is not None - and aux_names[0][1] in spd[0].dtype.names - ): - return spd - if hasattr(self, "aux"): - return self.aux.get_data() - return None - - def _boundnames_active(self): - if hasattr(self, "boundnames"): - if self.boundnames.get_data(): - return True - return False - - def check(self, f=None, verbose=True, level=1, checktype=None): - """ - Data check, returns True on success. - - Warning - ------- - The MF6 check mechanism is deprecated pending reimplementation - in a future release. While the checks API will remain in place - through 3.x, it may be unstable, and will likely change in 4.x. - """ - - if checktype is None: - checktype = mf6check - # do general checks - chk = super().check(f, verbose, level, checktype) - - # do mf6 specific checks - if hasattr(self, "auxiliary"): - # auxiliary variable check - # check if auxiliary variables are defined - aux_names = self.auxiliary.get_data() - if aux_names is not None and len(aux_names[0]) > 1: - num_aux_names = len(aux_names[0]) - 1 - # check for stress period data - aux_data = self._get_aux_data(aux_names) - if aux_data is not None and len(aux_data) > 0: - # make sure the check object exists - if chk is None: - chk = self._get_check(f, verbose, level, checktype) - if isinstance(aux_data, dict): - aux_datasets = list(aux_data.values()) - else: - aux_datasets = [aux_data] - dataset_type = "unknown" - for dataset in aux_datasets: - if isinstance(dataset, np.recarray): - dataset_type = "recarray" - break - elif isinstance(dataset, np.ndarray): - dataset_type = "ndarray" - break - # if aux data is in a list - if dataset_type == "recarray": - # check for time series data - time_series_name_dict = {} - if hasattr(self, "ts") and hasattr( - self.ts, "time_series_namerecord" - ): - # build dictionary of time series data variables - ts_nr = self.ts.time_series_namerecord.get_data() - if ts_nr is not None: - for item in ts_nr: - if len(item) > 0 and item[0] is not None: - time_series_name_dict[item[0]] = True - # auxiliary variables are last unless boundnames - # defined, then second to last - if self._boundnames_active(): - offset = 1 - else: - offset = 0 - - # loop through stress period datasets with aux data - for data in aux_datasets: - if isinstance(data, np.recarray): - for row in data: - row_size = len(row) - aux_start_loc = ( - row_size - num_aux_names - offset - 1 - ) - # loop through auxiliary variables - for idx, var in enumerate( - list(aux_names[0])[1:] - ): - # get index of current aux variable - data_index = aux_start_loc + idx - # verify auxiliary value is either - # numeric or time series variable - if ( - not datautil.DatumUtil.is_float( - row[data_index] - ) - and row[data_index] - not in time_series_name_dict - ): - desc = ( - f"Invalid non-numeric " - f"value " - f"'{row[data_index]}' " - f"in auxiliary data." - ) - chk._add_to_summary( - "Error", - desc=desc, - package=self.package_name, - ) - # else if stress period data is arrays - elif dataset_type == "ndarray": - # loop through auxiliary stress period datasets - for data in aux_datasets: - # verify auxiliary value is either numeric or time - # array series variable - if isinstance(data, np.ndarray): - val = np.isnan(np.sum(data)) - if val: - desc = ( - "One or more nan values were " - "found in auxiliary data." - ) - chk._add_to_summary( - "Warning", - desc=desc, - package=self.package_name, - ) - return chk - - def _get_nan_exclusion_list(self): - excl_list = [] - if hasattr(self, "stress_period_data"): - spd_struct = self.stress_period_data.structure - for item_struct in spd_struct.data_item_structures: - if item_struct.optional or item_struct.keystring_dict: - excl_list.append(item_struct.name) - return excl_list - - def _get_data_str(self, formal, show_data=True): - data_str = ( - "package_name = {}\nfilename = {}\npackage_type = {}" - "\nmodel_or_simulation_package = {}" - "\n{}_name = {}" - "\n".format( - self._get_pname(), - self._filename, - self.package_type, - self.model_or_sim.type.lower(), - self.model_or_sim.type.lower(), - self.model_or_sim.name, - ) - ) - if self.parent_file is not None and formal: - data_str = ( - f"{data_str}parent_file = {self.parent_file._get_pname()}\n\n" - ) - else: - data_str = f"{data_str}\n" - if show_data: - for block in self.blocks.values(): - if formal: - bl_repr = repr(block) - if len(bl_repr.strip()) > 0: - data_str = ( - "{}Block {}\n--------------------\n{}" "\n".format( - data_str, block.structure.name, repr(block) - ) - ) - else: - bl_str = str(block) - if len(bl_str.strip()) > 0: - data_str = ( - "{}Block {}\n--------------------\n{}" "\n".format( - data_str, block.structure.name, str(block) - ) - ) - return data_str - - def _get_pname(self): - if self.package_name is not None: - return str(self.package_name) - else: - return str(self._filename) - - def _get_block_header_info(self, line, path): - # init - header_variable_strs = [] - arr_clean_line = line.strip().split() - header_comment = MFComment( - "", path + (arr_clean_line[1],), self.simulation_data, 0 - ) - # break header into components - if len(arr_clean_line) < 2: - message = ( - "Block header does not contain a name. Name " - 'expected in line "{}".'.format(line) - ) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.model_name, - self._get_pname(), - self.path, - "parsing block header", - None, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self.simulation_data.debug, - ) - elif len(arr_clean_line) == 2: - return MFBlockHeader( - arr_clean_line[1], - header_variable_strs, - header_comment, - self.simulation_data, - path, - ) - else: - # process text after block name - comment = False - for entry in arr_clean_line[2:]: - # if start of comment - if MFComment.is_comment(entry.strip()[0]): - comment = True - if comment: - header_comment.text = " ".join( - [header_comment.text, entry] - ) - else: - header_variable_strs.append(entry) - return MFBlockHeader( - arr_clean_line[1], - header_variable_strs, - header_comment, - self.simulation_data, - path, - ) - - def _update_size_defs(self): - # build temporary data lookup by name - data_lookup = {} - for block in self.blocks.values(): - for dataset in block.datasets.values(): - data_lookup[dataset.structure.name] = dataset - - # loop through all data - for block in self.blocks.values(): - for dataset in block.datasets.values(): - # if data shape is 1-D - if ( - dataset.structure.shape - and len(dataset.structure.shape) == 1 - ): - # if shape name is data in this package - if dataset.structure.shape[0] in data_lookup: - size_def = data_lookup[dataset.structure.shape[0]] - size_def_name = size_def.structure.name - - if isinstance(dataset, mfdata.MFTransient): - # for transient data always use the maximum size - new_size = -1 - for key in dataset.get_active_key_list(): - try: - data = dataset.get_data(key=key[0]) - except (OSError, MFDataException): - # TODO: Handle case where external file - # path has been moved - data = None - if data is not None: - data_len = len(data) - if data_len > new_size: - new_size = data_len - else: - # for all other data set max to size - new_size = -1 - try: - data = dataset.get_data() - except (OSError, MFDataException): - # TODO: Handle case where external file - # path has been moved - data = None - if data is not None: - new_size = len(dataset.get_data()) - - if size_def.get_data() is None: - current_size = -1 - else: - current_size = size_def.get_data() - - if new_size > current_size: - # store current size - size_def.set_data(new_size) - - # informational message to the user - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - print( - "INFORMATION: {} in {} changed to {} " - "based on size of {}".format( - size_def_name, - size_def.structure.path[:-1], - new_size, - dataset.structure.name, - ) - ) - - def inspect_cells(self, cell_list, stress_period=None): - """ - Inspect model cells. Returns package data associated with cells. - - Parameters - ---------- - cell_list : list of tuples - List of model cells. Each model cell is a tuple of integers. - ex: [(1,1,1), (2,4,3)] - stress_period : int - For transient data, only return data from this stress period. If - not specified or None, all stress period data will be returned. - - Returns - ------- - output : array - Array containing inspection results - - """ - data_found = [] - - # loop through blocks - local_index_names = [] - local_index_blocks = [] - local_index_values = [] - local_index_cellids = [] - # loop through blocks in package - for block in self.blocks.values(): - # loop through data in block - for dataset in block.datasets.values(): - if isinstance(dataset, mfdatalist.MFList): - # handle list data - cellid_column = None - local_index_name = None - # loop through list data column definitions - for index, data_item in enumerate( - dataset.structure.data_item_structures - ): - if index == 0 and data_item.type == DatumType.integer: - local_index_name = data_item.name - # look for cellid column in list data row - if isinstance(data_item, MFDataItemStructure) and ( - data_item.is_cellid or data_item.possible_cellid - ): - cellid_column = index - break - if cellid_column is not None: - data_output = DataSearchOutput(dataset.path) - local_index_vals = [] - local_index_cells = [] - # get data - if isinstance(dataset, mfdatalist.MFTransientList): - # data may be in multiple transient blocks, get - # data from appropriate blocks - main_data = dataset.get_data(stress_period) - if stress_period is not None: - main_data = {stress_period: main_data} - else: - # data is all in one block, get data - main_data = {-1: dataset.get_data()} - - # loop through each dataset - for key, value in main_data.items(): - if value is None: - continue - if data_output.data_header is None: - data_output.data_header = value.dtype.names - # loop through list data rows - for line in value: - # loop through list of cells we are searching - # for - for cell in cell_list: - if isinstance( - line[cellid_column], tuple - ) and cellids_equal( - line[cellid_column], cell - ): - # save data found - data_output.data_entries.append(line) - data_output.data_entry_ids.append(cell) - data_output.data_entry_stress_period.append( - key - ) - if datautil.DatumUtil.is_int(line[0]): - # save index data for further - # processing. assuming index is - # always first entry - local_index_vals.append(line[0]) - local_index_cells.append(cell) - - if ( - local_index_name is not None - and len(local_index_vals) > 0 - ): - # capture index lookups for scanning related data - local_index_names.append(local_index_name) - local_index_blocks.append(block.path[-1]) - local_index_values.append(local_index_vals) - local_index_cellids.append(local_index_cells) - if len(data_output.data_entries) > 0: - data_found.append(data_output) - elif isinstance(dataset, mfdataarray.MFArray): - # handle array data - data_shape = copy.deepcopy( - dataset.structure.data_item_structures[0].shape - ) - if dataset.path[-1] == "top": - # top is a special case where the two datasets - # need to be combined to get the correct layer top - model_grid = self.model_or_sim.modelgrid - main_data = {-1: model_grid.top_botm} - data_shape.append("nlay") - else: - if isinstance(dataset, mfdataarray.MFTransientArray): - # data may be in multiple blocks, get data from - # appropriate blocks - main_data = dataset.get_data(stress_period) - if stress_period is not None: - main_data = {stress_period: main_data} - else: - # data is all in one block, get a process data - main_data = {-1: dataset.get_data()} - if main_data is None: - continue - data_output = DataSearchOutput(dataset.path) - # loop through datasets - for key, array_data in main_data.items(): - if array_data is None: - continue - self.model_or_sim.match_array_cells( - cell_list, data_shape, array_data, key, data_output - ) - if len(data_output.data_entries) > 0: - data_found.append(data_output) - - if len(local_index_names) > 0: - # look for data that shares the index value with data found - # for example a shared well or reach number - for block in self.blocks.values(): - # loop through data - for dataset in block.datasets.values(): - if isinstance(dataset, mfdatalist.MFList): - data_item = dataset.structure.data_item_structures[0] - data_output = DataSearchOutput(dataset.path) - # loop through previous data found - for ( - local_index_name, - local_index_vals, - cell_ids, - local_block_name, - ) in zip( - local_index_names, - local_index_values, - local_index_cellids, - local_index_blocks, - ): - if local_block_name == block.path[-1]: - continue - if ( - isinstance(data_item, MFDataItemStructure) - and data_item.name == local_index_name - and data_item.type == DatumType.integer - ): - # matching data index type found, get data - if isinstance( - dataset, mfdatalist.MFTransientList - ): - # data may be in multiple blocks, get data - # from appropriate blocks - main_data = dataset.get_data(stress_period) - if stress_period is not None: - main_data = {stress_period: main_data} - else: - # data is all in one block - main_data = {-1: dataset.get_data()} - # loop through the data - for key, value in main_data.items(): - if value is None: - continue - if data_output.data_header is None: - data_output.data_header = ( - value.dtype.names - ) - # loop through each row of data - for line in value: - # loop through the index values we are - # looking for - for index_val, cell_id in zip( - local_index_vals, cell_ids - ): - # try to match index values we are - # looking for to the data - if index_val == line[0]: - # save data found - data_output.data_entries.append( - line - ) - data_output.data_entry_ids.append( - index_val - ) - data_output.data_entry_cellids.append( - cell_id - ) - data_output.data_entry_stress_period.append( - key - ) - if len(data_output.data_entries) > 0: - data_found.append(data_output) - return data_found - - def remove(self): - """Removes this package from the simulation/model it is currently a - part of. - """ - self.model_or_sim.remove_package(self) - - def build_child_packages_container(self, pkg_type, filerecord): - """Builds a container object for any child packages. This method is - only intended for FloPy internal use.""" - # get package class - package_obj = PackageContainer.package_factory( - pkg_type, self.model_or_sim.model_type - ) - # create child package object - child_pkgs_name = f"utl{pkg_type}packages" - child_pkgs_obj = PackageContainer.package_factory(child_pkgs_name, "") - if child_pkgs_obj is None and self.model_or_sim.model_type is None: - # simulation level object, try just the package type in the name - child_pkgs_name = f"{pkg_type}packages" - child_pkgs_obj = PackageContainer.package_factory( - child_pkgs_name, "" - ) - if child_pkgs_obj is None: - # see if the package is part of one of the supported model types - for model_type in MFStructure().sim_struct.model_types: - child_pkgs_name = f"{model_type}{pkg_type}packages" - child_pkgs_obj = PackageContainer.package_factory( - child_pkgs_name, "" - ) - if child_pkgs_obj is not None: - break - child_pkgs = child_pkgs_obj( - self.model_or_sim, self, pkg_type, filerecord, None, package_obj - ) - setattr(self, pkg_type, child_pkgs) - self._child_package_groups[pkg_type] = child_pkgs - - def _get_dfn_name_dict(self): - dfn_name_dict = {} - item_num = 0 - for item in self.structure.dfn_list: - if len(item) > 1: - item_name = item[1].split() - if len(item_name) > 1 and item_name[0] == "name": - dfn_name_dict[item_name[1]] = item_num - item_num += 1 - return dfn_name_dict - - def build_child_package(self, pkg_type, data, parameter_name, filerecord): - """Builds a child package. This method is only intended for FloPy - internal use.""" - if not hasattr(self, pkg_type): - self.build_child_packages_container(pkg_type, filerecord) - if data is not None: - package_group = getattr(self, pkg_type) - # build child package file name - child_path = package_group.next_default_file_path() - # create new empty child package - package_obj = PackageContainer.package_factory( - pkg_type, self.model_or_sim.model_type - ) - package = package_obj( - self, filename=child_path, child_builder_call=True - ) - assert hasattr(package, parameter_name) - - if isinstance(data, dict): - # order data correctly - dfn_name_dict = package._get_dfn_name_dict() - ordered_data_items = [] - for key, value in data.items(): - if key in dfn_name_dict: - ordered_data_items.append( - [dfn_name_dict[key], key, value] - ) - else: - ordered_data_items.append([999999, key, value]) - ordered_data_items = sorted( - ordered_data_items, key=lambda x: x[0] - ) - - # evaluate and add data to package - unused_data = {} - for order, key, value in ordered_data_items: - # if key is an attribute of the child package - if isinstance(key, str) and hasattr(package, key): - # set child package attribute - child_data_attr = getattr(package, key) - if isinstance(child_data_attr, mfdatalist.MFList): - child_data_attr.set_data(value, autofill=True) - elif isinstance(child_data_attr, mfdata.MFData): - child_data_attr.set_data(value) - elif key == "fname" or key == "filename": - child_path = value - package._filename = value - else: - setattr(package, key, value) - else: - unused_data[key] = value - if unused_data: - setattr(package, parameter_name, unused_data) - else: - setattr(package, parameter_name, data) - - # append package to list - package_group.init_package(package, child_path) - return package - - def build_mfdata(self, var_name, data=None): - """Returns the appropriate data type object (mfdatalist, mfdataarray, - or mfdatascalar) given that object the appropriate structure (looked - up based on var_name) and any data supplied. This method is for - internal FloPy library use only. - - Parameters - ---------- - var_name : str - Variable name - - data : many supported types - Data contained in this object - - Returns - ------- - data object : MFData subclass - - """ - if self.loading_package: - data = None - for key, block in self.structure.blocks.items(): - if var_name in block.data_structures: - if block.name not in self.blocks: - self.blocks[block.name] = MFBlock( - self.simulation_data, - self.dimensions, - block, - self.path + (key,), - self.model_or_sim, - self, - ) - dataset_struct = block.data_structures[var_name] - var_path = self.path + (key, var_name) - ds = self.blocks[block.name].add_dataset( - dataset_struct, data, var_path - ) - self._data_list.append(ds) - return ds - - message = 'Unable to find variable "{}" in package ' '"{}".'.format( - var_name, self.package_type - ) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.model_name, - self._get_pname(), - self.path, - "building data objects", - None, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self.simulation_data.debug, - ) - - def set_model_relative_path(self, model_ws): - """Sets the model path relative to the simulation's path. - - Parameters - ---------- - model_ws : str - Model path relative to the simulation's path. - - """ - # update blocks - for key, block in self.blocks.items(): - block.set_model_relative_path(model_ws) - # update sub-packages - for package in self._package_container.packagelist: - package.set_model_relative_path(model_ws) - - def set_all_data_external( - self, - check_data=True, - external_data_folder=None, - base_name=None, - binary=False, - ): - """Sets the package's list and array data to be stored externally. - - Parameters - ---------- - check_data : bool - Determine if data error checking is enabled - external_data_folder - Folder where external data will be stored - base_name: str - Base file name prefix for all files - binary: bool - Whether file will be stored as binary - """ - # set blocks - for key, block in self.blocks.items(): - file_name = os.path.split(self.filename)[1] - if base_name is not None: - file_name = f"{base_name}_{file_name}" - block.set_all_data_external( - file_name, - check_data, - external_data_folder, - binary, - ) - # set sub-packages - for package in self._package_container.packagelist: - package.set_all_data_external( - check_data, - external_data_folder, - base_name, - binary, - ) - - def set_all_data_internal(self, check_data=True): - """Sets the package's list and array data to be stored internally. - - Parameters - ---------- - check_data : bool - Determine if data error checking is enabled - - """ - # set blocks - for key, block in self.blocks.items(): - block.set_all_data_internal(check_data) - # set sub-packages - for package in self._package_container.packagelist: - package.set_all_data_internal(check_data) - - def load(self, strict=True): - """Loads the package from file. - - Parameters - ---------- - strict : bool - Enforce strict checking of data. - - Returns - ------- - success : bool - - """ - # open file - try: - fd_input_file = open( - datautil.clean_filename(self.get_file_path()), "r" - ) - except OSError as e: - if e.errno == errno.ENOENT: - message = "File {} of type {} could not be opened.".format( - self.get_file_path(), self.package_type - ) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.model_name, - self.package_name, - self.path, - "loading package file", - None, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self.simulation_data.debug, - ) - - try: - self._load_blocks(fd_input_file, strict) - except ReadAsArraysException as err: - fd_input_file.close() - raise ReadAsArraysException(err) - # close file - fd_input_file.close() - - if self.simulation_data.auto_set_sizes: - self._update_size_defs() - - # return validity of file - return self.is_valid() - - def is_valid(self): - """Returns whether or not this package is valid. - - Returns - ------- - is valid : bool - - """ - # Check blocks - for block in self.blocks.values(): - # Non-optional blocks must be enabled - if ( - block.structure.number_non_optional_data() > 0 - and not block.enabled - and block.is_allowed() - ): - self.last_error = ( - f'Required block "{block.block_header.name}" not enabled' - ) - return False - # Enabled blocks must be valid - if block.enabled and not block.is_valid: - self.last_error = f'Invalid block "{block.block_header.name}"' - return False - - return True - - def _load_blocks(self, fd_input_file, strict=True, max_blocks=sys.maxsize): - # init - self.simulation_data.mfdata[self.path + ("pkg_hdr_comments",)] = ( - MFComment("", self.path, self.simulation_data) - ) - self.post_block_comments = MFComment( - "", self.path, self.simulation_data - ) - - blocks_read = 0 - found_first_block = False - line = " " - while line != "": - line = fd_input_file.readline() - clean_line = line.strip() - # If comment or empty line - if MFComment.is_comment(clean_line, True): - self._store_comment(line, found_first_block) - elif len(clean_line) > 4 and clean_line[:5].upper() == "BEGIN": - # parse block header - try: - block_header_info = self._get_block_header_info( - line, self.path - ) - except MFDataException as mfde: - message = ( - "An error occurred while loading block header " - 'in line "{}".'.format(line) - ) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.model_name, - self._get_pname(), - self.path, - "loading block header", - None, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self.simulation_data.debug, - mfde, - ) - - # if there is more than one possible block with the same name, - # resolve the correct block to use - block_key = block_header_info.name.lower() - block_num = 1 - possible_key = f"{block_header_info.name.lower()}-{block_num}" - if possible_key in self.blocks: - block_key = possible_key - block_header_name = block_header_info.name.lower() - while ( - block_key in self.blocks - and not self.blocks[block_key].is_allowed() - ): - block_key = f"{block_header_name}-{block_num}" - block_num += 1 - - if block_key not in self.blocks: - # block name not recognized, load block as comments and - # issue a warning - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - warning_str = ( - 'WARNING: Block "{}" is not a valid block ' - "name for file type " - "{}.".format(block_key, self.package_type) - ) - print(warning_str) - self._store_comment(line, found_first_block) - while line != "": - line = fd_input_file.readline() - self._store_comment(line, found_first_block) - arr_line = datautil.PyListUtil.split_data_line(line) - if arr_line and ( - len(arr_line[0]) <= 2 - or arr_line[0][:3].upper() == "END" - ): - break - else: - found_first_block = True - skip_block = False - cur_block = self.blocks[block_key] - if cur_block.loaded: - # Only blocks defined as repeating are allowed to have - # multiple entries - header_name = block_header_info.name - if not self.structure.blocks[ - header_name.lower() - ].repeating(): - # warn and skip block - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - warning_str = ( - 'WARNING: Block "{}" has ' - "multiple entries and is not " - "intended to be a repeating " - "block ({} package" - ")".format(header_name, self.package_type) - ) - print(warning_str) - skip_block = True - bhs = cur_block.structure.block_header_structure - bhval = block_header_info.variable_strings - if ( - len(bhs) > 0 - and len(bhval) > 0 - and bhs[0].name == "iper" - ): - nper = self.simulation_data.mfdata[ - ("tdis", "dimensions", "nper") - ].get_data() - bhval_int = datautil.DatumUtil.is_int(bhval[0]) - if not bhval_int or int(bhval[0]) > nper: - # skip block when block stress period is greater - # than nper - skip_block = True - - if not skip_block: - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print( - f" loading block {cur_block.structure.name}..." - ) - # reset comments - self.post_block_comments = MFComment( - "", self.path, self.simulation_data - ) - - cur_block.load( - block_header_info, fd_input_file, strict - ) - - # write post block comment - self.simulation_data.mfdata[ - cur_block.block_headers[-1].blk_post_comment_path - ] = self.post_block_comments - - blocks_read += 1 - if blocks_read >= max_blocks: - break - else: - # treat skipped block as if it is all comments - arr_line = datautil.PyListUtil.split_data_line( - clean_line - ) - self.post_block_comments.add_text(str(line), True) - while arr_line and ( - len(line) <= 2 or arr_line[0][:3].upper() != "END" - ): - line = fd_input_file.readline() - arr_line = datautil.PyListUtil.split_data_line( - line.strip() - ) - if arr_line: - self.post_block_comments.add_text( - str(line), True - ) - self.simulation_data.mfdata[ - cur_block.block_headers[-1].blk_post_comment_path - ] = self.post_block_comments - - else: - if not ( - len(clean_line) == 0 - or (len(line) > 2 and line[:3].upper() == "END") - ): - # Record file location of beginning of unresolved text - # treat unresolved text as a comment for now - self._store_comment(line, found_first_block) - - def write(self, ext_file_action=ExtFileAction.copy_relative_paths): - """Writes the package to a file. - - Parameters - ---------- - ext_file_action : ExtFileAction - How to handle pathing of external data files. - """ - if self.simulation_data.auto_set_sizes: - self._update_size_defs() - - # create any folders in path - package_file_path = self.get_file_path() - package_folder = os.path.split(package_file_path)[0] - if package_folder and not os.path.isdir(package_folder): - os.makedirs(os.path.split(package_file_path)[0]) - - # open file - fd = open(package_file_path, "w") - - # write flopy header - if self.simulation_data.write_headers: - dt = datetime.datetime.now() - header = ( - "# File generated by Flopy version {} on {} at {}." - "\n".format( - __version__, - dt.strftime("%m/%d/%Y"), - dt.strftime("%H:%M:%S"), - ) - ) - fd.write(header) - - # write blocks - self._write_blocks(fd, ext_file_action) - - fd.close() - - def create_package_dimensions(self): - """Creates a package dimensions object. For internal FloPy library - use. - - Returns - ------- - package dimensions : PackageDimensions - - """ - model_dims = None - if self.container_type[0] == PackageContainerType.model: - model_dims = [ - modeldimensions.ModelDimensions( - self.path[0], self.simulation_data - ) - ] - else: - # this is a simulation file that does not correspond to a specific - # model. figure out which model to use and return a dimensions - # object for that model - if self.dfn_file_name[0:3] == "exg": - exchange_rec_array = self.simulation_data.mfdata[ - ("nam", "exchanges", "exchanges") - ].get_data() - if exchange_rec_array is None: - return None - for exchange in exchange_rec_array: - if exchange[1].lower() == self._filename.lower(): - model_dims = [ - modeldimensions.ModelDimensions( - exchange[2], self.simulation_data - ), - modeldimensions.ModelDimensions( - exchange[3], self.simulation_data - ), - ] - break - elif ( - self.dfn_file_name[4:7] == "gnc" - and self.model_or_sim.type == "Simulation" - ): - # get exchange file name associated with gnc package - if self.parent_file is not None: - exg_file_name = self.parent_file.filename - else: - raise Exception( - "Can not create a simulation-level " - "gnc file without a corresponding " - "exchange file. Exchange file must be " - "created first." - ) - # get models associated with exchange file from sim nam file - try: - exchange_recarray_data = ( - self.model_or_sim.name_file.exchanges.get_data() - ) - except MFDataException as mfde: - message = ( - "An error occurred while retrieving exchange " - "data from the simulation name file. The error " - "occurred while processing gnc file " - f'"{self.filename}".' - ) - raise MFDataException( - mfdata_except=mfde, - package=self._get_pname(), - message=message, - ) - assert exchange_recarray_data is not None - model_1 = None - model_2 = None - for exchange in exchange_recarray_data: - if exchange[1] == exg_file_name: - model_1 = exchange[2] - model_2 = exchange[3] - - # assign models to gnc package - model_dims = [ - modeldimensions.ModelDimensions( - model_1, self.simulation_data - ), - modeldimensions.ModelDimensions( - model_2, self.simulation_data - ), - ] - elif self.parent_file is not None: - model_dims = [] - for md in self.parent_file.dimensions.model_dim: - model_name = md.model_name - model_dims.append( - modeldimensions.ModelDimensions( - model_name, self.simulation_data - ) - ) - else: - model_dims = [ - modeldimensions.ModelDimensions(None, self.simulation_data) - ] - return modeldimensions.PackageDimensions( - model_dims, self.structure, self.path - ) - - def _store_comment(self, line, found_first_block): - # Store comment - if found_first_block: - self.post_block_comments.text += line - else: - self.simulation_data.mfdata[ - self.path + ("pkg_hdr_comments",) - ].text += line - - def _write_blocks(self, fd, ext_file_action): - # verify that all blocks are valid - if not self.is_valid(): - message = ( - 'Unable to write out model file "{}" due to the ' - "following error: " - "{} ({})".format(self._filename, self.last_error, self.path) - ) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.model_name, - self._get_pname(), - self.path, - "writing package blocks", - None, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self.simulation_data.debug, - ) - - # write initial comments - pkg_hdr_comments_path = self.path + ("pkg_hdr_comments",) - if pkg_hdr_comments_path in self.simulation_data.mfdata: - self.simulation_data.mfdata[ - self.path + ("pkg_hdr_comments",) - ].write(fd, False) - - # loop through blocks - block_num = 1 - for block in self.blocks.values(): - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print(f" writing block {block.structure.name}...") - # write block - block.write(fd, ext_file_action=ext_file_action) - block_num += 1 - - def get_file_path(self): - """Returns the package file's path. - - Returns - ------- - file path : str - """ - if self.path[0] in self.simulation_data.mfpath.model_relative_path: - return os.path.join( - self.simulation_data.mfpath.get_model_path(self.path[0]), - self._filename, - ) - else: - return os.path.join( - self.simulation_data.mfpath.get_sim_path(), self._filename - ) - - def export(self, f, **kwargs): - """ - Method to export a package to netcdf or shapefile based on the - extension of the file name (.shp for shapefile, .nc for netcdf) - - Parameters - ---------- - f : str - Filename - kwargs : keyword arguments - modelgrid : flopy.discretization.Grid instance - User supplied modelgrid which can be used for exporting - in lieu of the modelgrid associated with the model object - - Returns - ------- - None or Netcdf object - - """ - from .. import export - - return export.utils.package_export(f, self, **kwargs) - - def plot(self, **kwargs): - """ - Plot 2-D, 3-D, transient 2-D, and stress period list (MfList) - package input data - - Parameters - ---------- - **kwargs : dict - filename_base : str - Base file name that will be used to automatically generate - file names for output image files. Plots will be exported as - image files if file_name_base is not None. (default is None) - file_extension : str - Valid matplotlib.pyplot file extension for savefig(). Only - used if filename_base is not None. (default is 'png') - mflay : int - MODFLOW zero-based layer number to return. If None, then all - all layers will be included. (default is None) - kper : int - MODFLOW zero-based stress period number to return. (default is - zero) - key : str - MfList dictionary key. (default is None) - - Returns - ------- - axes : list - Empty list is returned if filename_base is not None. Otherwise - a list of matplotlib.pyplot.axis are returned. - - """ - from ..plot.plotutil import PlotUtilities - - if not self.plottable: - raise TypeError("Simulation level packages are not plottable") - - axes = PlotUtilities._plot_package_helper(self, **kwargs) - return axes - - @staticmethod - def add_netcdf_entries(attrs, mname, pname, data_item, auxiliary=None, mesh=None, nlay=1): - if auxiliary: - auxnames = auxiliary - else: - auxnames = [] - - def add_entry(tagname, iaux=None, layer=None): - key = tagname - name = f"{pname}" - if iaux is not None: - key = f"{key}/{iaux}" - name = f"{name}_{auxiliary[iaux]}" - else: - name = f"{name}_{tagname}" - if layer is not None: - key = f"{key}/layer{layer}" - name = f"{name}_l{layer}" - - a = {} - a["varname"] = name.lower() - a["attrs"] = {} - a["attrs"]["modflow_input"] = ( - f"{mname}/{pname}/{tagname}" - ).upper() - if iaux is not None: - a["attrs"]["modflow_iaux"] = iaux + 1 - if layer is not None: - a["attrs"]["layer"] = layer - attrs[key] = a - - iaux = None - layer = None - # TODO - #if dataset.structure.layered and mesh == "LAYERED": - if data_item.layered and mesh == "LAYERED": - if data_item.name == "aux" or data_item.name == "auxvar": - for n, auxname in enumerate(auxnames): - for l in range(nlay): - add_entry(data_item.name, n, l + 1) - else: - for l in range(nlay): - add_entry(data_item.name, layer=l + 1) - else: - if data_item.name == "aux" or data_item.name == "auxvar": - for n, auxname in enumerate(auxnames): - add_entry(data_item.name, iaux=n) - else: - add_entry(data_item.name) - - # TODO filter out aux "auxiliary" - @staticmethod - def netcdf_attrs(mtype, ptype, auxiliary=None, mesh=None, nlay=1): - import flopy.mf6.modflow as modflow - from .data.mfstructure import DfnPackage, MFSimulationStructure - from .data.mfdataarray import MFArray - attrs = {} - p = modflow.mfgwfnpf.ModflowGwfnpf - sim_struct = MFSimulationStructure() - d = DfnPackage(p) - sim_struct.add_package(d, model_file=False) - npf = sim_struct.package_struct_objs['npf'] - #print(dir(npf)) - for key, block in npf.blocks.items(): - if key != "griddata" and key != "period": - continue - #print(key) - #print(dir(block)) - #print(block.data_structures) - for d in block.data_structures: - #print(d) - if (block.data_structures[d].layered): - #print("layered") - pass - if isinstance(block.data_structures[d], MFArray): - #print(d) - pass - - for package in MFPackage.__subclasses__(): - sim_struct.process_dfn(DfnPackage(package)) - #print(f"RENO added {DfnPackage(package).dfn_file_name}") - p = DfnPackage(package) - c, sc = p.dfn_file_name.split('.')[0].split('-') - #if p.dfn_file_name == 'gwf-npf.dfn': - #c, sc = p.dfn_file_name.split('.')[0].split('-') - #print(c) - #print(sc) - #print("FOUND NPF") - if c == mtype.lower() and sc == ptype.lower(): - sim_struct.add_package(p, model_file=False) - exit - pso = sim_struct.package_struct_objs[ptype.lower()] - for key, block in pso.blocks.items(): - if key != "griddata" and key != "period": - continue - for d in block.data_structures: - print(d) - if (block.data_structures[d].layered): - print("layered") - if isinstance(block.data_structures[d], MFArray): - print(d) - if (block.data_structures[d].netcdf): - print(f"adding {d}") - MFPackage.add_netcdf_entries(attrs, mtype, ptype, block.data_structures[d], auxiliary, mesh, nlay) - - return_attrs = {} - for k in list(attrs): - return_attrs[k] = attrs[k]['attrs'] - - - return return_attrs - - - def netcdf_info(self, mesh=None): - attrs = {} - - def add_entry(tagname, iaux=None, layer=None): - key = tagname - name = f"{self.package_name}" - if iaux: - auxvar = self.dimensions.get_aux_variables()[0] - key = f"{key}/{iaux}" - name = f"{name}_{auxvar[iaux]}" - else: - name = f"{name}_{tagname}" - if layer: - key = f"{key}/layer{layer}" - name = f"{name}_l{layer}" - - a = {} - a["varname"] = name.lower() - a["attrs"] = {} - a["attrs"]["modflow_input"] = ( - f"{self.model_name}/{self.package_name}/{tagname}" - ).upper() - if iaux: - a["attrs"]["modflow_iaux"] = iaux - if layer: - a["attrs"]["layer"] = layer - attrs[key] = a - - def add_entries(name): - iaux = None - layer = None - if dataset.structure.layered and mesh == "LAYERED": - if name == "aux" or name == "auxvar": - for n, auxname in enumerate( - self.dimensions.get_aux_variables()[0] - ): - if auxname == "auxiliary" and n == 0: - continue - for l in range(self.model_or_sim.modelgrid.nlay): - add_entry(name, n, l + 1) - else: - for l in range(self.model_or_sim.modelgrid.nlay): - add_entry(name, layer=l + 1) - else: - if name == "aux" or name == "auxvar": - for n, auxname in enumerate( - self.dimensions.get_aux_variables()[0] - ): - if auxname == "auxiliary" and n == 0: - continue - add_entry(name, iaux=n) - else: - add_entry(name) - - if self.dimensions.get_aux_variables(): - auxnames = list(self.dimensions.get_aux_variables()[0]) - if len(auxnames) and auxnames[0] == "auxiliary": - auxnames.pop(0) - else: - auxnames = [] - - for key, block in self.blocks.items(): - if key != "griddata" and key != "period": - continue - for dataset in block.datasets.values(): - if isinstance(dataset, mfdataarray.MFArray): - for index, data_item in enumerate( - dataset.structure.data_item_structures - ): - if ( - dataset.structure.netcdf and - dataset.has_data() - ): - #add_entries(data_item.name) - MFPackage.add_netcdf_entries( - attrs, - self.model_name, - self.package_name, - dataset.structure, - auxnames, - mesh, - self.model_or_sim.modelgrid.nlay, - ) - - return attrs - - -class MFChildPackages: - """ - Behind the scenes code for creating an interface to access child packages - from a parent package. This class is automatically constructed by the - FloPy library and is for internal library use only. - - Parameters - ---------- - """ - - def __init__( - self, - model_or_sim, - parent, - pkg_type, - filerecord, - package=None, - package_class=None, - ): - self._packages = [] - self._filerecord = filerecord - if package is not None: - self._packages.append(package) - self._model_or_sim = model_or_sim - self._cpparent = parent - self._pkg_type = pkg_type - self._package_class = package_class - - def __init_subclass__(cls): - """Register package""" - super().__init_subclass__() - PackageContainer.packages_by_abbr[cls.package_abbr] = cls - - def __getattr__(self, attr): - if ( - "_packages" in self.__dict__ - and len(self._packages) > 0 - and hasattr(self._packages[0], attr) - ): - item = getattr(self._packages[0], attr) - return item - raise AttributeError(attr) - - def __getitem__(self, k): - if isinstance(k, int): - if k < len(self._packages): - return self._packages[k] - raise ValueError(f"Package index {k} does not exist.") - - def __setattr__(self, key, value): - if ( - key != "_packages" - and key != "_model_or_sim" - and key != "_cpparent" - and key != "_inattr" - and key != "_filerecord" - and key != "_package_class" - and key != "_pkg_type" - ): - if len(self._packages) == 0: - raise Exception( - "No {} package is currently attached to package" - " {}. Use the initialize method to create a(n) " - "{} package before attempting to access its " - "properties.".format( - self._pkg_type, self._cpparent.filename, self._pkg_type - ) - ) - package = self._packages[0] - setattr(package, key, value) - return - super().__setattr__(key, value) - - def __default_file_path_base(self, file_path, suffix=""): - stem = os.path.split(file_path)[1] - stem_lst = stem.split(".") - file_name = ".".join(stem_lst[:-1]) - if len(stem_lst) > 1: - file_ext = stem_lst[-1] - return f"{file_name}.{file_ext}{suffix}.{self._pkg_type}" - elif suffix != "": - return f"{stem}.{self._pkg_type}" - else: - return f"{stem}.{suffix}.{self._pkg_type}" - - def __file_path_taken(self, possible_path): - for package in self._packages: - # Do case insensitive compare - if package.filename.lower() == possible_path.lower(): - return True - return False - - def next_default_file_path(self): - possible_path = self.__default_file_path_base(self._cpparent.filename) - suffix = 0 - while self.__file_path_taken(possible_path): - possible_path = self.__default_file_path_base( - self._cpparent.filename, suffix - ) - suffix += 1 - return possible_path - - def init_package(self, package, fname, remove_packages=True): - if remove_packages: - # clear out existing packages - self._remove_packages() - elif fname is not None: - self._remove_packages(fname) - if fname is None: - # build a file name - fname = self.next_default_file_path() - package._filename = fname - # check file record variable - found = False - fr_data = self._filerecord.get_data() - if fr_data is not None: - for line in fr_data: - if line[0] == fname: - found = True - if not found: - # append file record variable - self._filerecord.append_data([(fname,)]) - # add the package to the list - self._packages.append(package) - - def _update_filename(self, old_fname, new_fname): - file_record = self._filerecord.get_data() - new_file_record_data = [] - if file_record is not None: - file_record_data = file_record[0] - for item in file_record_data: - base, fname = os.path.split(item) - if fname.lower() == old_fname.lower(): - if base: - new_file_record_data.append( - (os.path.join(base, new_fname),) - ) - else: - new_file_record_data.append((new_fname,)) - else: - new_file_record_data.append((item,)) - else: - new_file_record_data.append((new_fname,)) - self._filerecord.set_data(new_file_record_data) - - def _append_package(self, package, fname, update_frecord=True): - if fname is None: - # build a file name - fname = self.next_default_file_path() - package._filename = fname - - if update_frecord: - # set file record variable - file_record = self._filerecord.get_data() - file_record_data = file_record - new_file_record_data = [] - for item in file_record_data: - new_file_record_data.append((item[0],)) - new_file_record_data.append((fname,)) - self._filerecord.set_data(new_file_record_data) - - for existing_pkg in self._packages: - if existing_pkg is package: - # do not add the same package twice - return - # add the package to the list - self._packages.append(package) - - def _remove_packages(self, fname=None, only_pop_from_list=False): - rp_list = [] - for idx, package in enumerate(self._packages): - if fname is None or package.filename == fname: - if not only_pop_from_list: - self._model_or_sim.remove_package(package) - rp_list.append(idx) - for idx in reversed(rp_list): - self._packages.pop(idx) diff --git a/flopy/mf6/tmp/mfmodel.py b/flopy/mf6/tmp/mfmodel.py deleted file mode 100644 index 4918b5e96b..0000000000 --- a/flopy/mf6/tmp/mfmodel.py +++ /dev/null @@ -1,2229 +0,0 @@ -import inspect -import os -import sys -import warnings -from typing import Optional, Union - -import numpy as np - -from ..discretization.grid import Grid -from ..discretization.modeltime import ModelTime -from ..discretization.structuredgrid import StructuredGrid -from ..discretization.unstructuredgrid import UnstructuredGrid -from ..discretization.vertexgrid import VertexGrid -from ..mbase import ModelInterface -from ..utils import datautil -from ..utils.check import mf6check -from .coordinates import modeldimensions -from .data import mfdata, mfdatalist, mfstructure -from .data.mfdatautil import DataSearchOutput, iterable -from .mfbase import ( - ExtFileAction, - FlopyException, - MFDataException, - MFFileMgmt, - PackageContainer, - PackageContainerType, - ReadAsArraysException, - VerbosityLevel, -) -from .mfpackage import MFPackage -from .utils.mfenums import DiscretizationType -from .utils.output_util import MF6Output - - -class MFModel(ModelInterface): - """ - MODFLOW-6 model base class. Represents a single model in a simulation. - - Parameters - ---------- - simulation_data : MFSimulationData - Simulation data object of the simulation this model will belong to - structure : MFModelStructure - Structure of this type of model - modelname : str - Name of the model - model_nam_file : str - Relative path to the model name file from model working folder - version : str - Version of modflow - exe_name : str - Model executable name - model_ws : str - Model working folder path - disfile : str - Relative path to dis file from model working folder - grid_type : str - Type of grid the model will use (structured, unstructured, vertices) - verbose : bool - Verbose setting for model operations (default False) - - Attributes - ---------- - name : str - Name of the model - exe_name : str - Model executable name - packages : dict of MFPackage - Dictionary of model packages - - """ - - def __init__( - self, - simulation, - model_type="gwf6", - modelname="model", - model_nam_file=None, - version="mf6", - exe_name="mf6", - add_to_simulation=True, - structure=None, - model_rel_path=".", - verbose=False, - **kwargs, - ): - self._package_container = PackageContainer(simulation.simulation_data) - self.simulation = simulation - self.simulation_data = simulation.simulation_data - self.name = modelname - self.name_file = None - self._version = version - self.model_type = model_type - self.type = "Model" - - if model_nam_file is None: - model_nam_file = f"{modelname}.nam" - - if add_to_simulation: - self.structure = simulation.register_model( - self, model_type, modelname, model_nam_file - ) - else: - self.structure = structure - self.set_model_relative_path(model_rel_path) - self.exe_name = exe_name - self.dimensions = modeldimensions.ModelDimensions( - self.name, self.simulation_data - ) - self.simulation_data.model_dimensions[modelname] = self.dimensions - self._ftype_num_dict = {} - self._package_paths = {} - self._verbose = verbose - - if model_nam_file is None: - self.model_nam_file = f"{modelname}.nam" - else: - self.model_nam_file = model_nam_file - - # check for spatial reference info in kwargs - xll = kwargs.pop("xll", None) - yll = kwargs.pop("yll", None) - self._xul = kwargs.pop("xul", None) - self._yul = kwargs.pop("yul", None) - rotation = kwargs.pop("rotation", 0.0) - crs = kwargs.pop("crs", None) - # build model grid object - self._modelgrid = Grid(crs=crs, xoff=xll, yoff=yll, angrot=rotation) - - self.start_datetime = None - # check for extraneous kwargs - if len(kwargs) > 0: - kwargs_str = ", ".join(kwargs.keys()) - excpt_str = ( - f'Extraneous kwargs "{kwargs_str}" provided to MFModel.' - ) - raise FlopyException(excpt_str) - - # build model name file - # create name file based on model type - support different model types - package_obj = PackageContainer.package_factory("nam", model_type[0:3]) - if not package_obj: - excpt_str = ( - f"Name file could not be found for model{model_type[0:3]}." - ) - raise FlopyException(excpt_str) - - self.name_file = package_obj( - self, - filename=self.model_nam_file, - pname=self.name, - _internal_package=True, - ) - - def __init_subclass__(cls): - """Register model type""" - super().__init_subclass__() - PackageContainer.modflow_models.append(cls) - PackageContainer.models_by_type[cls.model_type] = cls - - def __getattr__(self, item): - """ - __getattr__ - used to allow for getting packages as if they are - attributes - - Parameters - ---------- - item : str - 3 character package name (case insensitive) - - - Returns - ------- - pp : Package object - Package object of type :class:`flopy.pakbase.Package` - - """ - if item == "name_file" or not hasattr(self, "name_file"): - raise AttributeError(item) - - package = self.get_package(item) - if package is not None: - return package - raise AttributeError(item) - - def __setattr__(self, name, value): - if hasattr(self, name) and getattr(self, name) is not None: - attribute = object.__getattribute__(self, name) - if attribute is not None and isinstance(attribute, mfdata.MFData): - try: - if isinstance(attribute, mfdatalist.MFList): - attribute.set_data(value, autofill=True) - else: - attribute.set_data(value) - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self.name, - package="", - ) - return - super().__setattr__(name, value) - - def __repr__(self): - return self._get_data_str(True) - - def __str__(self): - return self._get_data_str(False) - - def _get_data_str(self, formal): - file_mgr = self.simulation_data.mfpath - data_str = ( - "name = {}\nmodel_type = {}\nversion = {}\nmodel_" - "relative_path = {}" - "\n\n".format( - self.name, - self.model_type, - self.version, - file_mgr.model_relative_path[self.name], - ) - ) - - for package in self.packagelist: - pk_str = package._get_data_str(formal, False) - if formal: - if len(pk_str.strip()) > 0: - data_str = ( - "{}###################\nPackage {}\n" - "###################\n\n" - "{}\n".format(data_str, package._get_pname(), pk_str) - ) - else: - pk_str = package._get_data_str(formal, False) - if len(pk_str.strip()) > 0: - data_str = ( - "{}###################\nPackage {}\n" - "###################\n\n" - "{}\n".format(data_str, package._get_pname(), pk_str) - ) - return data_str - - @property - def package_key_dict(self): - """ - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_type_dict - - @property - def package_dict(self): - """Returns a copy of the package name dictionary. - - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_dict - - @property - def package_names(self): - """Returns a list of package names. - - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_names - - @property - def package_type_dict(self): - """ - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_type_dict - - @property - def package_name_dict(self): - """ - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_name_dict - - @property - def package_filename_dict(self): - """ - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_filename_dict - - @property - def nper(self): - """Number of stress periods. - - Returns - ------- - nper : int - Number of stress periods in the simulation. - - """ - try: - return self.simulation.tdis.nper.array - except AttributeError: - return None - - @property - def modeltime(self): - """Model time discretization information. - - Returns - ------- - modeltime : ModelTime - FloPy object containing time discretization information for the - simulation. - - """ - tdis = self.simulation.get_package("tdis", type_only=True) - period_data = tdis.perioddata.get_data() - - # build steady state data - sto = self.get_package("sto", type_only=True) - if sto is None: - steady = np.full((len(period_data["perlen"])), True, dtype=bool) - else: - steady = np.full((len(period_data["perlen"])), False, dtype=bool) - ss_periods = sto.steady_state.get_active_key_dict() - for period, val in ss_periods.items(): - if val: - ss_periods[period] = sto.steady_state.get_data(period) - tr_periods = sto.transient.get_active_key_dict() - for period, val in tr_periods.items(): - if val: - tr_periods[period] = sto.transient.get_data(period) - if ss_periods: - last_ss_value = False - # loop through steady state array - for index, value in enumerate(steady): - # resolve if current index is steady state or transient - if index in ss_periods and ss_periods[index]: - last_ss_value = True - elif index in tr_periods and tr_periods[index]: - last_ss_value = False - if last_ss_value is True: - steady[index] = True - - # build model time - itmuni = tdis.time_units.get_data() - start_date_time = tdis.start_date_time.get_data() - - self._model_time = ModelTime( - perlen=period_data["perlen"], - nstp=period_data["nstp"], - tsmult=period_data["tsmult"], - time_units=itmuni, - start_datetime=start_date_time, - steady_state=steady - ) - return self._model_time - - @property - def modeldiscrit(self): - """Basic model spatial discretization information. This is used - internally prior to model spatial discretization information being - fully loaded. - - Returns - ------- - model grid : Grid subclass - FloPy object containing basic spatial discretization information - for the model. - - """ - if self.get_grid_type() == DiscretizationType.DIS: - dis = self.get_package("dis") - return StructuredGrid( - nlay=dis.nlay.get_data(), - nrow=dis.nrow.get_data(), - ncol=dis.ncol.get_data(), - ) - elif self.get_grid_type() == DiscretizationType.DISV: - dis = self.get_package("disv") - return VertexGrid( - ncpl=dis.ncpl.get_data(), nlay=dis.nlay.get_data() - ) - elif self.get_grid_type() == DiscretizationType.DISU: - dis = self.get_package("disu") - nodes = dis.nodes.get_data() - ncpl = np.array([nodes], dtype=int) - return UnstructuredGrid(ncpl=ncpl) - - @property - def modelgrid(self): - """Model spatial discretization information. - - Returns - ------- - model grid : Grid subclass - FloPy object containing spatial discretization information for the - model. - - """ - force_resync = False - if not self._mg_resync: - return self._modelgrid - if self.get_grid_type() == DiscretizationType.DIS: - dis = self.get_package("dis") - if not hasattr(dis, "_init_complete"): - if not hasattr(dis, "delr"): - # dis package has not yet been initialized - return self._modelgrid - else: - # dis package has been partially initialized - self._modelgrid = StructuredGrid( - delc=dis.delc.array, - delr=dis.delr.array, - top=None, - botm=None, - idomain=None, - lenuni=None, - crs=self._modelgrid.crs, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot, - ) - else: - botm = dis.botm.array - idomain = dis.idomain.array - if idomain is None: - force_resync = True - idomain = self._resolve_idomain(idomain, botm) - self._modelgrid = StructuredGrid( - delc=dis.delc.array, - delr=dis.delr.array, - top=dis.top.array, - botm=botm, - idomain=idomain, - lenuni=dis.length_units.array, - crs=self._modelgrid.crs, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot, - ) - elif self.get_grid_type() == DiscretizationType.DISV: - dis = self.get_package("disv") - if not hasattr(dis, "_init_complete"): - if not hasattr(dis, "cell2d"): - # disv package has not yet been initialized - return self._modelgrid - else: - # disv package has been partially initialized - self._modelgrid = VertexGrid( - vertices=dis.vertices.array, - cell2d=dis.cell2d.array, - top=None, - botm=None, - idomain=None, - lenuni=None, - crs=self._modelgrid.crs, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot, - ) - else: - botm = dis.botm.array - idomain = dis.idomain.array - if idomain is None: - force_resync = True - idomain = self._resolve_idomain(idomain, botm) - self._modelgrid = VertexGrid( - vertices=dis.vertices.array, - cell2d=dis.cell2d.array, - top=dis.top.array, - botm=botm, - idomain=idomain, - lenuni=dis.length_units.array, - crs=self._modelgrid.crs, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot, - ) - elif self.get_grid_type() == DiscretizationType.DISU: - dis = self.get_package("disu") - if not hasattr(dis, "_init_complete"): - # disu package has not yet been fully initialized - return self._modelgrid - - # check to see if ncpl can be constructed from ihc array, - # otherwise set ncpl equal to [nodes] - ihc = dis.ihc.array - iac = dis.iac.array - ncpl = UnstructuredGrid.ncpl_from_ihc(ihc, iac) - if ncpl is None: - ncpl = np.array([dis.nodes.get_data()], dtype=int) - cell2d = dis.cell2d.array - idomain = dis.idomain.array - if idomain is None: - idomain = np.ones(dis.nodes.array, dtype=int) - if cell2d is None: - if ( - self.simulation.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - print( - "WARNING: cell2d information missing. Functionality of " - "the UnstructuredGrid will be limited." - ) - - vertices = dis.vertices.array - if vertices is None: - if ( - self.simulation.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - print( - "WARNING: vertices information missing. Functionality " - "of the UnstructuredGrid will be limited." - ) - vertices = None - else: - vertices = np.array(vertices) - - self._modelgrid = UnstructuredGrid( - vertices=vertices, - cell2d=cell2d, - top=dis.top.array, - botm=dis.bot.array, - idomain=idomain, - lenuni=dis.length_units.array, - ncpl=ncpl, - crs=self._modelgrid.crs, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot, - iac=dis.iac.array, - ja=dis.ja.array, - ) - elif self.get_grid_type() == DiscretizationType.DISV1D: - dis = self.get_package("disv1d") - if not hasattr(dis, "_init_complete"): - if not hasattr(dis, "cell1d"): - # disv package has not yet been initialized - return self._modelgrid - else: - # disv package has been partially initialized - self._modelgrid = VertexGrid( - vertices=dis.vertices.array, - cell1d=dis.cell1d.array, - top=None, - botm=None, - idomain=None, - lenuni=None, - crs=self._modelgrid.crs, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot, - ) - else: - botm = dis.bottom.array - idomain = dis.idomain.array - if idomain is None: - force_resync = True - idomain = self._resolve_idomain(idomain, botm) - self._modelgrid = VertexGrid( - vertices=dis.vertices.array, - cell1d=dis.cell1d.array, - top=None, - botm=botm, - idomain=idomain, - lenuni=dis.length_units.array, - crs=self._modelgrid.crs, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot, - ) - elif self.get_grid_type() == DiscretizationType.DIS2D: - dis = self.get_package("dis2d") - if not hasattr(dis, "_init_complete"): - if not hasattr(dis, "delr"): - # dis package has not yet been initialized - return self._modelgrid - else: - # dis package has been partially initialized - self._modelgrid = StructuredGrid( - delc=dis.delc.array, - delr=dis.delr.array, - top=None, - botm=None, - idomain=None, - lenuni=None, - crs=self._modelgrid.crs, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot, - ) - else: - botm = dis.bottom.array - idomain = dis.idomain.array - if idomain is None: - force_resync = True - idomain = self._resolve_idomain(idomain, botm) - self._modelgrid = StructuredGrid( - delc=dis.delc.array, - delr=dis.delr.array, - top=None, - botm=botm, - idomain=idomain, - lenuni=dis.length_units.array, - crs=self._modelgrid.crs, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot, - ) - elif self.get_grid_type() == DiscretizationType.DISV2D: - dis = self.get_package("disv2d") - if not hasattr(dis, "_init_complete"): - if not hasattr(dis, "cell2d"): - # disv package has not yet been initialized - return self._modelgrid - else: - # disv package has been partially initialized - self._modelgrid = VertexGrid( - vertices=dis.vertices.array, - cell2d=dis.cell2d.array, - top=None, - botm=None, - idomain=None, - lenuni=None, - crs=self._modelgrid.crs, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot, - ) - else: - botm = dis.bottom.array - idomain = dis.idomain.array - if idomain is None: - force_resync = True - idomain = self._resolve_idomain(idomain, botm) - self._modelgrid = VertexGrid( - vertices=dis.vertices.array, - cell2d=dis.cell2d.array, - top=None, - botm=botm, - idomain=idomain, - lenuni=dis.length_units.array, - crs=self._modelgrid.crs, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot, - ) - else: - return self._modelgrid - - # get coordinate data from dis file - xorig = dis.xorigin.get_data() - yorig = dis.yorigin.get_data() - angrot = dis.angrot.get_data() - - # resolve offsets - if xorig is None: - xorig = self._modelgrid.xoffset - if xorig is None: - if self._xul is not None: - xorig = self._modelgrid._xul_to_xll(self._xul) - else: - xorig = 0.0 - if yorig is None: - yorig = self._modelgrid.yoffset - if yorig is None: - if self._yul is not None: - yorig = self._modelgrid._yul_to_yll(self._yul) - else: - yorig = 0.0 - if angrot is None: - angrot = self._modelgrid.angrot - self._modelgrid.set_coord_info( - xorig, - yorig, - angrot, - self._modelgrid.crs, - ) - self._mg_resync = not self._modelgrid.is_complete or force_resync - return self._modelgrid - - @property - def packagelist(self): - """List of model packages.""" - return self._package_container.packagelist - - @property - def namefile(self): - """Model namefile object.""" - return self.model_nam_file - - @property - def model_ws(self): - """Model file path.""" - file_mgr = self.simulation_data.mfpath - return file_mgr.get_model_path(self.name) - - @property - def exename(self): - """MODFLOW executable name""" - return self.exe_name - - @property - def version(self): - """Version of MODFLOW""" - return self._version - - @property - def solver_tols(self): - """Returns the solver inner hclose and rclose values. - - Returns - ------- - inner_hclose, rclose : float, float - - """ - ims = self.get_ims_package() - if ims is not None: - rclose = ims.rcloserecord.get_data() - if rclose is not None: - rclose = rclose[0][0] - return ims.inner_hclose.get_data(), rclose - return None - - @property - def laytyp(self): - """Layering type""" - try: - return self.npf.icelltype.array - except AttributeError: - return None - - @property - def hdry(self): - """Dry cell value""" - return -1e30 - - @property - def hnoflo(self): - """No-flow cell value""" - return 1e30 - - @property - def laycbd(self): - """Quasi-3D confining bed. Not supported in MODFLOW-6. - - Returns - ------- - None : None - - """ - return None - - @property - def output(self): - budgetkey = None - if self.model_type == "gwt6": - budgetkey = "MASS BUDGET FOR ENTIRE MODEL" - try: - return MF6Output(self.oc, budgetkey=budgetkey) - except AttributeError: - return MF6Output(self, budgetkey=budgetkey) - - def export(self, f, **kwargs): - """Method to export a model to a shapefile or netcdf file - - Parameters - ---------- - f : str - File name (".nc" for netcdf or ".shp" for shapefile) - or dictionary of .... - **kwargs : keyword arguments - modelgrid: flopy.discretization.Grid - User supplied modelgrid object which will supersede the built - in modelgrid object - if fmt is set to 'vtk', parameters of Vtk initializer - - """ - from ..export import utils - - return utils.model_export(f, self, **kwargs) - - def netcdf_attrs(self, mesh=None): - """Return dictionary of dataset (model) scoped attributes - Parameters - ---------- - mesh : str - mesh type if dataset is ugrid complient - """ - attrs = { - "modflow_grid" : "", - "modflow_model" : "", - } - if self.get_grid_type() == DiscretizationType.DIS: - attrs["modflow_grid"] = "STRUCTURED" - elif self.get_grid_type() == DiscretizationType.DISV: - attrs["modflow_grid"] = "VERTEX" - - attrs["modflow_model"] = f"{self.name.upper()}: MODFLOW 6 {self.model_type.upper()[0:3]} model" - - # supported => LAYERED - if mesh: - attrs["mesh"] = mesh - - return attrs - - @property - def verbose(self): - """Verbose setting for model operations (True/False)""" - return self._verbose - - @verbose.setter - def verbose(self, verbose): - """Verbose setting for model operations (True/False)""" - self._verbose = verbose - - def check(self, f=None, verbose=True, level=1): - """ - Check model data for common errors. - - Warning - ------- - The MF6 check mechanism is deprecated pending reimplementation - in a future release. While the checks API will remain in place - through 3.x, it may be unstable, and will likely change in 4.x. - - Parameters - ---------- - f : str or file handle - String defining file name or file handle for summary file - of check method output. If a string is passed a file handle - is created. If f is None, check method does not write - results to a summary file. (default is None) - verbose : bool - Boolean flag used to determine if check method results are - written to the screen - level : int - Check method analysis level. If level=0, summary checks are - performed. If level=1, full checks are performed. - - Returns - ------- - success : bool - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow.load('model.nam') - >>> m.check() - """ - - # check instance for model-level check - chk = mf6check(self, f=f, verbose=verbose, level=level) - - return self._check(chk, level) - - @staticmethod - def load_base( - cls_child, - simulation, - structure, - modelname="NewModel", - model_nam_file="modflowtest.nam", - mtype="gwf", - version="mf6", - exe_name: Union[str, os.PathLike] = "mf6", - strict=True, - model_rel_path=os.curdir, - load_only=None, - ): - """ - Class method that loads an existing model. - - Parameters - ---------- - simulation : MFSimulation - simulation object that this model is a part of - simulation_data : MFSimulationData - simulation data object - structure : MFModelStructure - structure of this type of model - model_name : str - name of the model - model_nam_file : str - relative path to the model name file from model working folder - version : str - version of modflow - exe_name : str or PathLike - model executable name or path - strict : bool - strict mode when loading files - model_rel_path : str - relative path of model folder to simulation folder - load_only : list - list of package abbreviations or package names corresponding to - packages that flopy will load. default is None, which loads all - packages. the discretization packages will load regardless of this - setting. subpackages, like time series and observations, will also - load regardless of this setting. - example list: ['ic', 'maw', 'npf', 'oc', 'my_well_package_1'] - - Returns - ------- - model : MFModel - - Examples - -------- - """ - instance = cls_child( - simulation, - modelname, - model_nam_file=model_nam_file, - version=version, - exe_name=exe_name, - add_to_simulation=False, - structure=structure, - model_rel_path=model_rel_path, - ) - - # build case consistent load_only dictionary for quick lookups - load_only = PackageContainer._load_only_dict(load_only) - - # load name file - instance.name_file.load(strict) - - # order packages - vnum = mfstructure.MFStructure().get_version_string() - # FIX: Transport - Priority packages maybe should not be hard coded - priority_packages = { - f"dis{vnum}": 1, - f"disv{vnum}": 1, - f"disu{vnum}": 1, - } - packages_ordered = [] - package_recarray = instance.simulation_data.mfdata[ - (modelname, "nam", "packages", "packages") - ] - if package_recarray.array is None: - return instance - - for item in package_recarray.get_data(): - if item[0] in priority_packages: - packages_ordered.insert(0, (item[0], item[1], item[2])) - else: - packages_ordered.append((item[0], item[1], item[2])) - - # load packages - sim_struct = mfstructure.MFStructure().sim_struct - instance._ftype_num_dict = {} - for ftype, fname, pname in packages_ordered: - ftype_orig = ftype - ftype = ftype[0:-1].lower() - if ( - ftype in structure.package_struct_objs - or ftype in sim_struct.utl_struct_objs - ): - if ( - load_only is not None - and not PackageContainer._in_pkg_list( - priority_packages, ftype_orig, pname - ) - and not PackageContainer._in_pkg_list( - load_only, ftype_orig, pname - ) - ): - if ( - simulation.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - print(f" skipping package {ftype}...") - continue - if model_rel_path and model_rel_path != ".": - # strip off model relative path from the file path - filemgr = simulation.simulation_data.mfpath - fname = filemgr.strip_model_relative_path(modelname, fname) - if ( - simulation.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - print(f" loading package {ftype}...") - # load package - instance.load_package(ftype, fname, pname, strict, None) - sim_data = simulation.simulation_data - if ftype == "dis" and not sim_data.max_columns_user_set: - # set column wrap to ncol - dis = instance.get_package("dis", type_only=True) - if dis is not None and hasattr(dis, "ncol"): - sim_data.max_columns_of_data = dis.ncol.get_data() - sim_data.max_columns_user_set = False - sim_data.max_columns_auto_set = True - # load referenced packages - if modelname in instance.simulation_data.referenced_files: - for ref_file in instance.simulation_data.referenced_files[ - modelname - ].values(): - if ( - ref_file.file_type in structure.package_struct_objs - or ref_file.file_type in sim_struct.utl_struct_objs - ) and not ref_file.loaded: - instance.load_package( - ref_file.file_type, - ref_file.file_name, - None, - strict, - ref_file.reference_path, - ) - ref_file.loaded = True - - # TODO: fix jagged lists where appropriate - - return instance - - def inspect_cells( - self, - cell_list, - stress_period=None, - output_file_path=None, - inspect_budget=True, - inspect_dependent_var=True, - ): - """ - Inspect model cells. Returns model data associated with cells. - - Parameters - ---------- - cell_list : list of tuples - List of model cells. Each model cell is a tuple of integers. - ex: [(1,1,1), (2,4,3)] - stress_period : int - For transient data qnly return data from this stress period. If - not specified or None, all stress period data will be returned. - output_file_path: str - Path to output file that will contain the inspection results - inspect_budget: bool - Inspect budget file - inspect_dependent_var: bool - Inspect head file - Returns - ------- - output : dict - Dictionary containing inspection results - - Examples - -------- - - >>> import flopy - >>> sim = flopy.mf6.MFSimulationBase.load("name", "mf6", "mf6", ".") - >>> model = sim.get_model() - >>> inspect_list = [(2, 3, 2), (0, 4, 2), (0, 2, 4)] - >>> out_file = os.path.join("temp", "inspect_AdvGW_tidal.csv") - >>> model.inspect_cells(inspect_list, output_file_path=out_file) - """ - # handle no cell case - if cell_list is None or len(cell_list) == 0: - return None - - output_by_package = {} - # loop through all packages - for pp in self.packagelist: - # call the package's "inspect_cells" method - package_output = pp.inspect_cells(cell_list, stress_period) - if len(package_output) > 0: - output_by_package[f"{pp.package_name} package"] = ( - package_output - ) - # get dependent variable - if inspect_dependent_var: - try: - if self.model_type == "gwf6": - heads = self.output.head() - name = "heads" - elif self.model_type == "gwt6": - heads = self.output.concentration() - name = "concentration" - else: - inspect_dependent_var = False - except Exception: - inspect_dependent_var = False - if inspect_dependent_var and heads is not None: - kstp_kper_lst = heads.get_kstpkper() - data_output = DataSearchOutput((name,)) - data_output.output = True - for kstp_kper in kstp_kper_lst: - if stress_period is not None and stress_period != kstp_kper[1]: - continue - head_array = np.array(heads.get_data(kstpkper=kstp_kper)) - # flatten output data in disv and disu cases - if len(cell_list[0]) == 2: - head_array = head_array[0, :, :] - elif len(cell_list[0]) == 1: - head_array = head_array[0, 0, :] - # find data matches - self.match_array_cells( - cell_list, - head_array.shape, - head_array, - kstp_kper, - data_output, - ) - if len(data_output.data_entries) > 0: - output_by_package[f"{name} output"] = [data_output] - - # get model dimensions - model_shape = self.modelgrid.shape - - # get budgets - if inspect_budget: - try: - bud = self.output.budget() - except Exception: - inspect_budget = False - if inspect_budget and bud is not None: - kstp_kper_lst = bud.get_kstpkper() - rec_names = bud.get_unique_record_names() - budget_matches = [] - for rec_name in rec_names: - # clean up binary string name - string_name = str(rec_name)[3:-1].strip() - data_output = DataSearchOutput((string_name,)) - data_output.output = True - for kstp_kper in kstp_kper_lst: - if ( - stress_period is not None - and stress_period != kstp_kper[1] - ): - continue - budget_array = np.array( - bud.get_data( - kstpkper=kstp_kper, - text=rec_name, - full3D=True, - )[0] - ) - if len(budget_array.shape) == 4: - # get rid of 4th "time" dimension - budget_array = budget_array[0, :, :, :] - # flatten output data in disv and disu cases - if len(cell_list[0]) == 2 and len(budget_array.shape) >= 3: - budget_array = budget_array[0, :, :] - elif ( - len(cell_list[0]) == 1 and len(budget_array.shape) >= 2 - ): - budget_array = budget_array[0, :] - # find data matches - if budget_array.shape != model_shape: - # no support yet for different shaped budgets like - # flow_ja_face - continue - - self.match_array_cells( - cell_list, - budget_array.shape, - budget_array, - kstp_kper, - data_output, - ) - if len(data_output.data_entries) > 0: - budget_matches.append(data_output) - if len(budget_matches) > 0: - output_by_package["budget output"] = budget_matches - - if len(output_by_package) > 0 and output_file_path is not None: - with open(output_file_path, "w") as fd: - # write document header - fd.write(f"Inspect cell results for model {self.name}\n") - output = [] - for cell in cell_list: - output.append(" ".join([str(i) for i in cell])) - output = ",".join(output) - fd.write(f"Model cells inspected,{output}\n\n") - - for package_name, matches in output_by_package.items(): - fd.write(f"Results from {package_name}\n") - for search_output in matches: - # write header line with data name - fd.write( - f",Results from " - f"{search_output.path_to_data[-1]}\n" - ) - # write data header - if search_output.transient: - if search_output.output: - fd.write(",stress_period,time_step") - else: - fd.write(",stress_period/key") - if search_output.data_header is not None: - if len(search_output.data_entry_cellids) > 0: - fd.write(",cellid") - h_columns = ",".join(search_output.data_header) - fd.write(f",{h_columns}\n") - else: - fd.write(",cellid,data\n") - # write data found - for index, data_entry in enumerate( - search_output.data_entries - ): - if search_output.transient: - sp = search_output.data_entry_stress_period[ - index - ] - if search_output.output: - fd.write(f",{sp[1]},{sp[0]}") - else: - fd.write(f",{sp}") - if search_output.data_header is not None: - if len(search_output.data_entry_cellids) > 0: - cells = search_output.data_entry_cellids[ - index - ] - output = " ".join([str(i) for i in cells]) - fd.write(f",{output}") - fd.write(self._format_data_entry(data_entry)) - else: - output = " ".join( - [ - str(i) - for i in search_output.data_entry_ids[ - index - ] - ] - ) - fd.write(f",{output}") - fd.write(self._format_data_entry(data_entry)) - fd.write("\n") - return output_by_package - - def match_array_cells( - self, cell_list, data_shape, array_data, key, data_output - ): - # loop through list of cells we are searching for - for cell in cell_list: - if len(data_shape) == 3 or data_shape[0] == "nodes": - # data is by cell - if array_data.ndim == 3 and len(cell) == 3: - data_output.data_entries.append( - array_data[cell[0], cell[1], cell[2]] - ) - data_output.data_entry_ids.append(cell) - data_output.data_entry_stress_period.append(key) - elif array_data.ndim == 2 and len(cell) == 2: - data_output.data_entries.append( - array_data[cell[0], cell[1]] - ) - data_output.data_entry_ids.append(cell) - data_output.data_entry_stress_period.append(key) - elif array_data.ndim == 1 and len(cell) == 1: - data_output.data_entries.append(array_data[cell[0]]) - data_output.data_entry_ids.append(cell) - data_output.data_entry_stress_period.append(key) - else: - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - warning_str = ( - 'WARNING: CellID "{}" not same ' - "number of dimensions as data " - "{}.".format(cell, data_output.path_to_data) - ) - print(warning_str) - elif len(data_shape) == 2: - # get data based on ncpl/lay - if array_data.ndim == 2 and len(cell) == 2: - data_output.data_entries.append( - array_data[cell[0], cell[1]] - ) - data_output.data_entry_ids.append(cell) - data_output.data_entry_stress_period.append(key) - elif array_data.ndim == 1 and len(cell) == 1: - data_output.data_entries.append(array_data[cell[0]]) - data_output.data_entry_ids.append(cell) - data_output.data_entry_stress_period.append(key) - elif len(data_shape) == 1: - # get data based on nodes - if len(cell) == 1 and array_data.ndim == 1: - data_output.data_entries.append(array_data[cell[0]]) - data_output.data_entry_ids.append(cell) - data_output.data_entry_stress_period.append(key) - - @staticmethod - def _format_data_entry(data_entry): - output = "" - if iterable(data_entry, True): - for item in data_entry: - if isinstance(item, tuple): - formatted = " ".join([str(i) for i in item]) - output = f"{output},{formatted}" - else: - output = f"{output},{item}" - return f"{output}\n" - else: - return f",{data_entry}\n" - - def write(self, ext_file_action=ExtFileAction.copy_relative_paths): - """ - Writes out model's package files. - - Parameters - ---------- - ext_file_action : ExtFileAction - Defines what to do with external files when the simulation path has - changed. defaults to copy_relative_paths which copies only files - with relative paths, leaving files defined by absolute paths fixed. - - """ - - # write name file - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - print(" writing model name file...") - - self.name_file.write(ext_file_action=ext_file_action) - - if not self.simulation_data.max_columns_user_set: - grid_type = self.get_grid_type() - if grid_type == DiscretizationType.DIS: - self.simulation_data.max_columns_of_data = self.dis.ncol.get_data() - self.simulation_data.max_columns_user_set = False - self.simulation_data.max_columns_auto_set = True - - # write packages - for pp in self.packagelist: - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - print(f" writing package {pp._get_pname()}...") - pp.write(ext_file_action=ext_file_action) - - def get_grid_type(self): - """ - Return the type of grid used by model 'model_name' in simulation - containing simulation data 'simulation_data'. - - Returns - ------- - grid type : DiscretizationType - """ - package_recarray = self.name_file.packages - structure = mfstructure.MFStructure() - if ( - package_recarray.search_data( - f"dis{structure.get_version_string()}", 0 - ) - is not None - ): - return DiscretizationType.DIS - elif ( - package_recarray.search_data( - f"disv{structure.get_version_string()}", 0 - ) - is not None - ): - return DiscretizationType.DISV - elif ( - package_recarray.search_data( - f"disu{structure.get_version_string()}", 0 - ) - is not None - ): - return DiscretizationType.DISU - elif ( - package_recarray.search_data( - f"disv1d{structure.get_version_string()}", 0 - ) - is not None - ): - return DiscretizationType.DISV1D - elif ( - package_recarray.search_data( - f"dis2d{structure.get_version_string()}", 0 - ) - is not None - ): - return DiscretizationType.DIS2D - elif ( - package_recarray.search_data( - f"disv2d{structure.get_version_string()}", 0 - ) - is not None - ): - return DiscretizationType.DISV2D - - return DiscretizationType.UNDEFINED - - def get_ims_package(self): - """Get the IMS package associated with this model. - - Returns - ------- - IMS package : ModflowIms - """ - solution_group = self.simulation.name_file.solutiongroup.get_data(0) - for record in solution_group: - for name in record.dtype.names: - if name == "slntype" or name == "slnfname": - continue - if record[name] == self.name: - return self.simulation.get_solution_package( - record.slnfname - ) - return None - - def get_steadystate_list(self): - """Returns a list of stress periods that are steady state. - - Returns - ------- - steady state list : list - - """ - ss_list = [] - tdis = self.simulation.get_package("tdis") - period_data = tdis.perioddata.get_data() - index = 0 - pd_len = len(period_data) - while index < pd_len: - ss_list.append(True) - index += 1 - - storage = self.get_package("sto", type_only=True) - if storage is not None: - tr_keys = storage.transient.get_keys(True) - ss_keys = storage.steady_state.get_keys(True) - for key in tr_keys: - ss_list[key] = False - for ss_list_key in range(key + 1, len(ss_list)): - for ss_key in ss_keys: - if ss_key == ss_list_key: - break - ss_list[key] = False - return ss_list - - def is_valid(self): - """ - Checks the validity of the model and all of its packages - - Returns - ------- - valid : bool - - """ - - # valid name file - if not self.name_file.is_valid(): - return False - - # valid packages - for pp in self.packagelist: - if not pp.is_valid(): - return False - - # required packages exist - for package_struct in self.structure.package_struct_objs.values(): - if ( - not package_struct.optional - and package_struct.file_type - not in self._package_container.package_type_dict - ): - return False - - return True - - def set_model_relative_path(self, model_ws): - """ - Sets the file path to the model folder relative to the simulation - folder and updates all model file paths, placing them in the model - folder. - - Parameters - ---------- - model_ws : str - Model working folder relative to simulation working folder - - """ - # set all data internal - self.set_all_data_internal(False) - - # update path in the file manager - file_mgr = self.simulation_data.mfpath - file_mgr.set_last_accessed_model_path() - path = model_ws - file_mgr.model_relative_path[self.name] = path - - if ( - model_ws - and model_ws != "." - and self.simulation.name_file is not None - ): - model_folder_path = file_mgr.get_model_path(self.name) - if not os.path.exists(model_folder_path): - # make new model folder - os.makedirs(model_folder_path) - # update model name file location in simulation name file - models = self.simulation.name_file.models - models_data = models.get_data() - for index, entry in enumerate(models_data): - old_model_file_name = os.path.split(entry[1])[1] - old_model_base_name = os.path.splitext(old_model_file_name)[0] - if ( - old_model_base_name.lower() == self.name.lower() - or self.name == entry[2] - ): - models_data[index][1] = os.path.join( - path, old_model_file_name - ) - break - models.set_data(models_data) - - if self.name_file is not None: - # update listing file location in model name file - list_file = self.name_file.list.get_data() - if list_file: - path, list_file_name = os.path.split(list_file) - try: - self.name_file.list.set_data( - os.path.join(path, list_file_name) - ) - except MFDataException as mfde: - message = ( - "Error occurred while setting relative " - 'path "{}" in model ' - '"{}".'.format( - os.path.join(path, list_file_name), self.name - ) - ) - raise MFDataException( - mfdata_except=mfde, - model=self.model_name, - package=self.name_file._get_pname(), - message=message, - ) - # update package file locations in model name file - packages = self.name_file.packages - packages_data = packages.get_data() - if packages_data is not None: - for index, entry in enumerate(packages_data): - # get package object associated with entry - package = None - if len(entry) >= 3: - package = self.get_package(entry[2]) - if package is None: - package = self.get_package(entry[0]) - if package is not None: - # combine model relative path with package path - packages_data[index][1] = os.path.join( - path, package.filename - ) - else: - # package not found, create path based on - # information in name file - old_package_name = os.path.split(entry[1])[-1] - packages_data[index][1] = os.path.join( - path, old_package_name - ) - packages.set_data(packages_data) - # update files referenced from within packages - for package in self.packagelist: - package.set_model_relative_path(model_ws) - - def _remove_package_from_dictionaries(self, package): - # remove package from local dictionaries and lists - if package.path in self._package_paths: - del self._package_paths[package.path] - self._package_container.remove_package(package) - - def get_package(self, name=None, type_only=False, name_only=False): - """ - Finds a package by package name, package key, package type, or partial - package name. returns either a single package, a list of packages, - or None. - - Parameters - ---------- - name : str - Name or type of the package, 'my-riv-1, 'RIV', 'LPF', etc. - type_only : bool - Search for package by type only - name_only : bool - Search for package by name only - - Returns - ------- - pp : Package object - - """ - return self._package_container.get_package(name, type_only, name_only) - - def remove_package(self, package_name): - """ - Removes package and all child packages from the model. - `package_name` can be the package's name, type, or package object to - be removed from the model. - - Parameters - ---------- - package_name : str - Package name, package type, or package object to be removed from - the model. - - """ - if isinstance(package_name, MFPackage): - packages = [package_name] - else: - packages = self.get_package(package_name) - if not isinstance(packages, list) and packages is not None: - packages = [packages] - if packages is None: - return - for package in packages: - if package.model_or_sim.name != self.name: - except_text = ( - "Package can not be removed from model " - "{self.model_name} since it is not part of it." - ) - raise mfstructure.FlopyException(except_text) - - self._remove_package_from_dictionaries(package) - - try: - # remove package from name file - package_data = self.name_file.packages.get_data() - except MFDataException as mfde: - message = ( - "Error occurred while reading package names " - "from name file in model " - f'"{self.name}"' - ) - raise MFDataException( - mfdata_except=mfde, - model=self.model_name, - package=self.name_file._get_pname(), - message=message, - ) - try: - new_rec_array = None - for item in package_data: - filename = os.path.basename(item[1]) - if filename != package.filename: - if new_rec_array is None: - new_rec_array = np.rec.array( - [item.tolist()], package_data.dtype - ) - else: - new_rec_array = np.hstack((item, new_rec_array)) - except: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.structure.get_model(), - self.structure.get_package(), - self._path, - "building package recarray", - self.structure.name, - inspect.stack()[0][3], - type_, - value_, - traceback_, - None, - self.simulation_data.debug, - ) - try: - self.name_file.packages.set_data(new_rec_array) - except MFDataException as mfde: - message = ( - "Error occurred while setting package names " - f'from name file in model "{self.name}". Package name ' - f"data:\n{new_rec_array}" - ) - raise MFDataException( - mfdata_except=mfde, - model=self.model_name, - package=self.name_file._get_pname(), - message=message, - ) - - # build list of child packages - child_package_list = [] - for pkg in self.packagelist: - if ( - pkg.parent_file is not None - and pkg.parent_file.path == package.path - ): - child_package_list.append(pkg) - # remove child packages - for child_package in child_package_list: - self._remove_package_from_dictionaries(child_package) - - def update_package_filename(self, package, new_name): - """ - Updates the filename for a package. For internal flopy use only. - - Parameters - ---------- - package : MFPackage - Package object - new_name : str - New package name - """ - try: - # get namefile package data - package_data = self.name_file.packages.get_data() - except MFDataException as mfde: - message = ( - "Error occurred while updating package names " - "from name file in model " - f'"{self.name}".' - ) - raise MFDataException( - mfdata_except=mfde, - model=self.model_name, - package=self.name_file._get_pname(), - message=message, - ) - try: - file_mgr = self.simulation_data.mfpath - model_rel_path = file_mgr.model_relative_path[self.name] - # update namefile package data with new name - new_rec_array = None - old_leaf = os.path.split(package.filename)[1] - for item in package_data: - leaf = os.path.split(item[1])[1] - if leaf == old_leaf: - item[1] = os.path.join(model_rel_path, new_name) - - if new_rec_array is None: - new_rec_array = np.rec.array( - [item.tolist()], package_data.dtype - ) - else: - new_rec_array = np.hstack((item, new_rec_array)) - except: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.structure.get_model(), - self.structure.get_package(), - self._path, - "updating package filename", - self.structure.name, - inspect.stack()[0][3], - type_, - value_, - traceback_, - None, - self.simulation_data.debug, - ) - try: - self.name_file.packages.set_data(new_rec_array) - except MFDataException as mfde: - message = ( - "Error occurred while updating package names " - f'from name file in model "{self.name}". Package name ' - f"data:\n{new_rec_array}" - ) - raise MFDataException( - mfdata_except=mfde, - model=self.model_name, - package=self.name_file._get_pname(), - message=message, - ) - - def rename_all_packages(self, name): - """Renames all package files in the model. - - Parameters - ---------- - name : str - Prefix of package names. Packages files will be named - .. - - """ - nam_filename = f"{name}.nam" - self.simulation.rename_model_namefile(self, nam_filename) - self.name_file.filename = nam_filename - self.model_nam_file = nam_filename - package_type_count = {} - for package in self.packagelist: - if package.package_type not in package_type_count: - base_filename, leaf = os.path.split(package.filename) - lleaf = leaf.split(".") - if len(lleaf) > 1: - # keep existing extension - ext = lleaf[-1] - else: - # no extension found, create a new one - ext = package.package_type - new_fileleaf = f"{name}.{ext}" - if base_filename != "": - package.filename = os.path.join( - base_filename, new_fileleaf - ) - else: - package.filename = new_fileleaf - package_type_count[package.package_type] = 1 - else: - package_type_count[package.package_type] += 1 - package.filename = "{}_{}.{}".format( - name, - package_type_count[package.package_type], - package.package_type, - ) - - def set_all_data_external( - self, - check_data=True, - external_data_folder=None, - base_name=None, - binary=False, - ): - """Sets the model's list and array data to be stored externally. - - Warning - ------- - The MF6 check mechanism is deprecated pending reimplementation - in a future release. While the checks API will remain in place - through 3.x, it may be unstable, and will likely change in 4.x. - - Parameters - ---------- - check_data : bool - Determines if data error checking is enabled during this - process. - external_data_folder - Folder, relative to the simulation path or model relative path - (see use_model_relative_path parameter), where external data - will be stored - base_name: str - Base file name prefix for all files - binary: bool - Whether file will be stored as binary - - """ - for package in self.packagelist: - package.set_all_data_external( - check_data, - external_data_folder, - base_name, - binary, - ) - - def set_all_data_internal(self, check_data=True): - """Sets the model's list and array data to be stored externally. - - Parameters - ---------- - check_data : bool - Determines if data error checking is enabled during this - process. - - """ - for package in self.packagelist: - package.set_all_data_internal(check_data) - - def register_package( - self, - package, - add_to_package_list=True, - set_package_name=True, - set_package_filename=True, - ): - """ - Registers a package with the model. This method is used internally - by FloPy and is not intended for use by the end user. - - Parameters - ---------- - package : MFPackage - Package to register - add_to_package_list : bool - Add package to lookup list - set_package_name : bool - Produce a package name for this package - set_package_filename : bool - Produce a filename for this package - - Returns - ------- - path, package structure : tuple, MFPackageStructure - - """ - package.container_type = [PackageContainerType.model] - if package.parent_file is not None: - path = package.parent_file.path + (package.package_type,) - else: - path = (self.name, package.package_type) - package_struct = self.structure.get_package_struct( - package.package_type - ) - if add_to_package_list and path in self._package_paths: - if ( - package_struct is not None - and not package_struct.multi_package_support - and not isinstance(package.parent_file, MFPackage) - ): - # package of this type already exists, replace it - self.remove_package(package.package_type) - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - print( - "WARNING: Package with type {} already exists. " - "Replacing existing package" - ".".format(package.package_type) - ) - elif ( - not set_package_name - and package.package_name - in self._package_container.package_name_dict - ): - # package of this type with this name already - # exists, replace it - self.remove_package( - self._package_container.package_name_dict[ - package.package_name - ] - ) - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - print( - "WARNING: Package with name {} already exists. " - "Replacing existing package" - ".".format(package.package_name) - ) - - # make sure path is unique - if path in self._package_paths: - path_iter = datautil.PathIter(path) - for new_path in path_iter: - if new_path not in self._package_paths: - path = new_path - break - self._package_paths[path] = 1 - - if package.package_type.lower() == "nam": - if not package.internal_package: - excpt_str = ( - "Unable to register nam file. Do not create your own nam " - "files. Nam files are automatically created and managed " - "for you by FloPy." - ) - print(excpt_str) - raise FlopyException(excpt_str) - - return path, self.structure.name_file_struct_obj - - package_extension = package.package_type - if set_package_name: - # produce a default package name - if ( - package_struct is not None - and package_struct.multi_package_support - ): - # check for other registered packages of this type - name_iter = datautil.NameIter(package.package_type, False) - for package_name in name_iter: - if ( - package_name - not in self._package_container.package_name_dict - ): - package.package_name = package_name - suffix = package_name.split("_") - if ( - len(suffix) > 1 - and datautil.DatumUtil.is_int(suffix[-1]) - and suffix[-1] != "0" - ): - # update file extension to make unique - package_extension = ( - f"{package_extension}_{suffix[-1]}" - ) - break - else: - package.package_name = package.package_type - - if set_package_filename: - # filename uses model base name - package._filename = f"{self.name}.{package.package_type}" - if ( - package._filename - in self._package_container.package_filename_dict - ): - # auto generate a unique file name and register it - file_name = MFFileMgmt.unique_file_name( - package._filename, - self._package_container.package_filename_dict, - ) - package._filename = file_name - - if add_to_package_list: - self._package_container.add_package(package) - - # add obs file to name file if it does not have a parent - if package.package_type in self.structure.package_struct_objs or ( - package.package_type == "obs" and package.parent_file is None - ): - # update model name file - pkg_type = package.package_type.upper() - if ( - package.package_type != "obs" and - self.structure.package_struct_objs[ - package.package_type - ].read_as_arrays - ): - pkg_type = pkg_type[0:-1] - # Model Assumption - assuming all name files have a package - # recarray - file_mgr = self.simulation_data.mfpath - model_rel_path = file_mgr.model_relative_path[self.name] - if model_rel_path != ".": - package_rel_path = os.path.join( - model_rel_path, package.filename - ) - else: - package_rel_path = package.filename - self.name_file.packages.update_record( - [ - f"{pkg_type}6", - package_rel_path, - package.package_name, - ], - 0, - ) - if package_struct is not None: - return (path, package_struct) - else: - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - print( - "WARNING: Unable to register unsupported file type {} " - "for model {}.".format(package.package_type, self.name) - ) - return None, None - - def load_package( - self, - ftype, - fname, - pname, - strict, - ref_path, - dict_package_name=None, - parent_package: Optional[MFPackage] = None, - ): - """ - Loads a package from a file. This method is used internally by FloPy - and is not intended for the end user. - - Parameters - ---------- - ftype : str - the file type - fname : str - the name of the file containing the package input - pname : str - the user-defined name for the package - strict : bool - strict mode when loading the file - ref_path : str - path to the file. uses local path if set to None - dict_package_name : str - package name for dictionary lookup - parent_package : MFPackage - parent package - - Examples - -------- - """ - if ref_path is not None: - fname = os.path.join(ref_path, fname) - sim_struct = mfstructure.MFStructure().sim_struct - if ( - ftype in self.structure.package_struct_objs - and self.structure.package_struct_objs[ftype].multi_package_support - ) or ( - ftype in sim_struct.utl_struct_objs - and sim_struct.utl_struct_objs[ftype].multi_package_support - ): - # resolve dictionary name for package - if dict_package_name is not None: - if parent_package is not None: - dict_package_name = f"{parent_package.path[-1]}_{ftype}" - else: - # use dict_package_name as the base name - if ftype in self._ftype_num_dict: - self._ftype_num_dict[dict_package_name] += 1 - else: - self._ftype_num_dict[dict_package_name] = 0 - dict_package_name = "{}_{}".format( - dict_package_name, - self._ftype_num_dict[dict_package_name], - ) - else: - # use ftype as the base name - if ftype in self._ftype_num_dict: - self._ftype_num_dict[ftype] += 1 - else: - self._ftype_num_dict[ftype] = 1 - if pname is not None: - dict_package_name = pname - else: - dict_package_name = ( - f"{ftype}-{self._ftype_num_dict[ftype]}" - ) - else: - dict_package_name = ftype - - # clean up model type text - model_type = self.structure.model_type - while datautil.DatumUtil.is_int(model_type[-1]): - model_type = model_type[0:-1] - - # create package - package_obj = PackageContainer.package_factory(ftype, model_type) - package = package_obj( - self, - filename=fname, - pname=dict_package_name, - loading_package=True, - parent_file=parent_package, - _internal_package=True, - ) - try: - package.load(strict) - except ReadAsArraysException: - # create ReadAsArrays package and load it instead - package_obj = PackageContainer.package_factory( - f"{ftype}a", model_type - ) - package = package_obj( - self, - filename=fname, - pname=dict_package_name, - loading_package=True, - parent_file=parent_package, - _internal_package=True, - ) - package.load(strict) - - # register child package with the model - self._package_container.add_package(package) - if parent_package is not None: - # register child package with the parent package - parent_package.add_package(package) - - return package - - def plot(self, SelPackList=None, **kwargs): - """ - Plot 2-D, 3-D, transient 2-D, and stress period list (MfList) - model input data from a model instance - - Args: - model: Flopy model instance - SelPackList: (list) list of package names to plot, if none - all packages will be plotted - - **kwargs : dict - filename_base : str - Base file name that will be used to automatically generate file - names for output image files. Plots will be exported as image - files if file_name_base is not None. (default is None) - file_extension : str - Valid matplotlib.pyplot file extension for savefig(). Only used - if filename_base is not None. (default is 'png') - mflay : int - MODFLOW zero-based layer number to return. If None, then all - all layers will be included. (default is None) - kper : int - MODFLOW zero-based stress period number to return. - (default is zero) - key : str - MfList dictionary key. (default is None) - - Returns: - axes : list - Empty list is returned if filename_base is not None. Otherwise - a list of matplotlib.pyplot.axis are returned. - """ - from ..plot.plotutil import PlotUtilities - - axes = PlotUtilities._plot_model_helper( - self, SelPackList=SelPackList, **kwargs - ) - - return axes - - @staticmethod - def _resolve_idomain(idomain, botm): - if idomain is None: - if botm is None: - return idomain - else: - return np.ones_like(botm) - return idomain diff --git a/flopy/mf6/tmp/mfpackage.py b/flopy/mf6/tmp/mfpackage.py deleted file mode 100644 index 8ab096f79a..0000000000 --- a/flopy/mf6/tmp/mfpackage.py +++ /dev/null @@ -1,3666 +0,0 @@ -import copy -import datetime -import errno -import inspect -import os -import sys -import warnings - -import numpy as np - -from ..mbase import ModelInterface -from ..pakbase import PackageInterface -from ..utils import datautil -from ..utils.check import mf6check -from ..version import __version__ -from .coordinates import modeldimensions -from .data import ( - mfdata, - mfdataarray, - mfdatalist, - mfdataplist, - mfdatascalar, - mfstructure, -) -from .data.mfdatautil import DataSearchOutput, MFComment, cellids_equal -from .data.mfstructure import DatumType, MFDataItemStructure, MFStructure -from .mfbase import ( - ExtFileAction, - FlopyException, - MFDataException, - MFFileMgmt, - MFInvalidTransientBlockHeaderException, - PackageContainer, - PackageContainerType, - ReadAsArraysException, - VerbosityLevel, -) -from .utils.output_util import MF6Output - - -class MFBlockHeader: - """ - Represents the header of a block in a MF6 input file. This class is used - internally by FloPy and its direct use by a user of this library is not - recommend. - - Parameters - ---------- - name : str - Block name - variable_strings : list - List of strings that appear after the block name - comment : MFComment - Comment text in the block header - - Attributes - ---------- - name : str - Block name - variable_strings : list - List of strings that appear after the block name - comment : MFComment - Comment text in the block header - data_items : list - List of MFVariable of the variables contained in this block - - """ - - def __init__( - self, - name, - variable_strings, - comment, - simulation_data=None, - path=None, - block=None, - ): - self.name = name - self.variable_strings = variable_strings - self.block = block - if not ( - (simulation_data is None and path is None) - or (simulation_data is not None and path is not None) - ): - raise FlopyException( - "Block header must be initialized with both " - "simulation_data and path or with neither." - ) - if simulation_data is None: - self.comment = comment - self.simulation_data = None - self.path = path - self.comment_path = None - else: - self.connect_to_dict(simulation_data, path, comment) - # TODO: Get data_items from dictionary - self.data_items = [] - # build block comment paths - self.blk_trailing_comment_path = ("blk_trailing_comment",) - self.blk_post_comment_path = ("blk_post_comment",) - if isinstance(path, list): - path = tuple(path) - if path is not None: - self.blk_trailing_comment_path = path + ( - name, - "blk_trailing_comment", - ) - self.blk_post_comment_path = path + ( - name, - "blk_post_comment", - ) - if self.blk_trailing_comment_path not in simulation_data.mfdata: - simulation_data.mfdata[self.blk_trailing_comment_path] = ( - MFComment("", "", simulation_data, 0) - ) - if self.blk_post_comment_path not in simulation_data.mfdata: - simulation_data.mfdata[self.blk_post_comment_path] = MFComment( - "\n", "", simulation_data, 0 - ) - else: - self.blk_trailing_comment_path = ("blk_trailing_comment",) - self.blk_post_comment_path = ("blk_post_comment",) - - def __lt__(self, other): - transient_key = self.get_transient_key() - if transient_key is None: - return True - else: - other_key = other.get_transient_key() - if other_key is None: - return False - else: - return transient_key < other_key - - def build_header_variables( - self, - simulation_data, - block_header_structure, - block_path, - data, - dimensions, - ): - """Builds data objects to hold header variables.""" - self.data_items = [] - var_path = block_path + (block_header_structure[0].name,) - - # fix up data - fixed_data = [] - if ( - block_header_structure[0].data_item_structures[0].type - == DatumType.keyword - ): - data_item = block_header_structure[0].data_item_structures[0] - fixed_data.append(data_item.name) - if isinstance(data, tuple): - data = list(data) - if isinstance(data, list): - fixed_data = fixed_data + data - else: - fixed_data.append(data) - if len(fixed_data) > 0: - fixed_data = [tuple(fixed_data)] - # create data object - new_data = self.block.data_factory( - simulation_data, - None, - block_header_structure[0], - True, - var_path, - dimensions, - fixed_data, - ) - - self.add_data_item(new_data, data) - - def add_data_item(self, new_data, data): - """Adds data to the block.""" - self.data_items.append(new_data) - while isinstance(data, list): - if len(data) > 0: - data = data[0] - else: - data = None - if not isinstance(data, tuple): - data = (data,) - self.blk_trailing_comment_path += data - self.blk_post_comment_path += data - - def is_same_header(self, block_header): - """Checks if `block_header` is the same header as this header.""" - if len(self.variable_strings) > 0: - if len(self.variable_strings) != len( - block_header.variable_strings - ): - return False - else: - for sitem, oitem in zip( - self.variable_strings, block_header.variable_strings - ): - if sitem != oitem: - return False - return True - elif ( - len(self.data_items) > 0 and len(block_header.variable_strings) > 0 - ): - typ_obj = ( - self.data_items[0].structure.data_item_structures[0].type_obj - ) - if typ_obj == int or typ_obj == float: - return bool( - self.variable_strings[0] - == block_header.variable_strings[0] - ) - else: - return True - elif len(self.data_items) == len(block_header.variable_strings): - return True - return False - - def get_comment(self): - """Get block header comment""" - if self.simulation_data is None: - return self.comment - else: - return self.simulation_data.mfdata[self.comment_path] - - def connect_to_dict(self, simulation_data, path, comment=None): - """Add comment to the simulation dictionary""" - self.simulation_data = simulation_data - self.path = path - self.comment_path = path + ("blk_hdr_comment",) - if comment is None: - simulation_data.mfdata[self.comment_path] = self.comment - else: - simulation_data.mfdata[self.comment_path] = comment - self.comment = None - - def write_header(self, fd): - """Writes block header to file object `fd`. - - Parameters - ---------- - fd : file object - File object to write block header to. - - """ - fd.write(f"BEGIN {self.name}") - if len(self.data_items) > 0: - if isinstance(self.data_items[0], mfdatascalar.MFScalar): - one_based = ( - self.data_items[0].structure.type == DatumType.integer - ) - entry = self.data_items[0].get_file_entry( - values_only=True, one_based=one_based - ) - else: - entry = self.data_items[0].get_file_entry() - fd.write(str(entry.rstrip())) - if len(self.data_items) > 1: - for data_item in self.data_items[1:]: - entry = data_item.get_file_entry(values_only=True) - fd.write(str(entry).rstrip()) - if self.get_comment().text: - fd.write(" ") - self.get_comment().write(fd) - fd.write("\n") - - def write_footer(self, fd): - """Writes block footer to file object `fd`. - - Parameters - ---------- - fd : file object - File object to write block footer to. - - """ - fd.write(f"END {self.name}") - if len(self.data_items) > 0: - one_based = self.data_items[0].structure.type == DatumType.integer - if isinstance(self.data_items[0], mfdatascalar.MFScalar): - entry = self.data_items[0].get_file_entry( - values_only=True, one_based=one_based - ) - else: - entry = self.data_items[0].get_file_entry() - fd.write(str(entry.rstrip())) - fd.write("\n") - - def get_transient_key(self, data_path=None): - """Get transient key associated with this block header.""" - transient_key = None - for index in range(0, len(self.data_items)): - if self.data_items[index].structure.type != DatumType.keyword: - if data_path == self.data_items[index].path: - # avoid infinite recursion - return True - transient_key = self.data_items[index].get_data() - if isinstance(transient_key, np.recarray): - item_struct = self.data_items[index].structure - key_index = item_struct.first_non_keyword_index() - if not ( - key_index is not None - and len(transient_key[0]) > key_index - ): - if key_index is None: - raise FlopyException( - "Block header index could " - "not be determined." - ) - else: - raise FlopyException( - 'Block header index "{}" ' - 'must be less than "{}"' - ".".format(key_index, len(transient_key[0])) - ) - transient_key = transient_key[0][key_index] - break - return transient_key - - -class MFBlock: - """ - Represents a block in a MF6 input file. This class is used internally - by FloPy and use by users of the FloPy library is not recommended. - - Parameters - ---------- - simulation_data : MFSimulationData - Data specific to this simulation - dimensions : MFDimensions - Describes model dimensions including model grid and simulation time - structure : MFVariableStructure - Structure describing block - path : tuple - Unique path to block - - Attributes - ---------- - block_headers : MFBlockHeader - Block header text (BEGIN/END), header variables, comments in the - header - structure : MFBlockStructure - Structure describing block - path : tuple - Unique path to block - datasets : OrderDict - Dictionary of dataset objects with keys that are the name of the - dataset - datasets_keyword : dict - Dictionary of dataset objects with keys that are key words to identify - start of dataset - enabled : bool - If block is being used in the simulation - - """ - - def __init__( - self, - simulation_data, - dimensions, - structure, - path, - model_or_sim, - container_package, - ): - self._simulation_data = simulation_data - self._dimensions = dimensions - self._model_or_sim = model_or_sim - self._container_package = container_package - self.block_headers = [ - MFBlockHeader( - structure.name, - [], - MFComment("", path, simulation_data, 0), - simulation_data, - path, - self, - ) - ] - self.structure = structure - self.path = path - self.datasets = {} - self.datasets_keyword = {} - # initially disable if optional - self.enabled = structure.number_non_optional_data() > 0 - self.loaded = False - self.external_file_name = None - self._structure_init() - - def __repr__(self): - return self._get_data_str(True) - - def __str__(self): - return self._get_data_str(False) - - def _get_data_str(self, formal): - data_str = "" - for dataset in self.datasets.values(): - if formal: - ds_repr = repr(dataset) - if len(ds_repr.strip()) > 0: - data_str = ( - f"{data_str}{dataset.structure.name}\n{dataset!r}\n" - ) - else: - ds_str = str(dataset) - if len(ds_str.strip()) > 0: - data_str = ( - f"{data_str}{dataset.structure.name}\n{dataset!s}\n" - ) - return data_str - - # return an MFScalar, MFList, or MFArray - def data_factory( - self, - sim_data, - model_or_sim, - structure, - enable, - path, - dimensions, - data=None, - package=None, - ): - """Creates the appropriate data child object derived from MFData.""" - data_type = structure.get_datatype() - # examine the data structure and determine the data type - if ( - data_type == mfstructure.DataType.scalar_keyword - or data_type == mfstructure.DataType.scalar - ): - return mfdatascalar.MFScalar( - sim_data, - model_or_sim, - structure, - data, - enable, - path, - dimensions, - ) - elif ( - data_type == mfstructure.DataType.scalar_keyword_transient - or data_type == mfstructure.DataType.scalar_transient - ): - trans_scalar = mfdatascalar.MFScalarTransient( - sim_data, model_or_sim, structure, enable, path, dimensions - ) - if data is not None: - trans_scalar.set_data(data, key=0) - return trans_scalar - elif data_type == mfstructure.DataType.array: - return mfdataarray.MFArray( - sim_data, - model_or_sim, - structure, - data, - enable, - path, - dimensions, - self, - ) - elif data_type == mfstructure.DataType.array_transient: - trans_array = mfdataarray.MFTransientArray( - sim_data, - model_or_sim, - structure, - enable, - path, - dimensions, - self, - ) - if data is not None: - trans_array.set_data(data, key=0) - return trans_array - elif data_type == mfstructure.DataType.list: - if ( - structure.basic_item - and self._container_package.package_type.lower() != "nam" - and self._simulation_data.use_pandas - ): - return mfdataplist.MFPandasList( - sim_data, - model_or_sim, - structure, - data, - enable, - path, - dimensions, - package, - self, - ) - else: - return mfdatalist.MFList( - sim_data, - model_or_sim, - structure, - data, - enable, - path, - dimensions, - package, - self, - ) - elif data_type == mfstructure.DataType.list_transient: - if structure.basic_item and self._simulation_data.use_pandas: - trans_list = mfdataplist.MFPandasTransientList( - sim_data, - model_or_sim, - structure, - enable, - path, - dimensions, - package, - self, - ) - else: - trans_list = mfdatalist.MFTransientList( - sim_data, - model_or_sim, - structure, - enable, - path, - dimensions, - package, - self, - ) - if data is not None: - trans_list.set_data(data, key=0, autofill=True) - return trans_list - elif data_type == mfstructure.DataType.list_multiple: - mult_list = mfdatalist.MFMultipleList( - sim_data, - model_or_sim, - structure, - enable, - path, - dimensions, - package, - self, - ) - if data is not None: - mult_list.set_data(data, key=0, autofill=True) - return mult_list - - def _structure_init(self): - # load datasets keywords into dictionary - for dataset_struct in self.structure.data_structures.values(): - for keyword in dataset_struct.get_keywords(): - self.datasets_keyword[keyword] = dataset_struct - # load block header data items into dictionary - for dataset in self.structure.block_header_structure: - self._new_dataset(dataset.name, dataset, True, None) - - def set_model_relative_path(self, model_ws): - """Sets `model_ws` as the model path relative to the simulation's - path. - - Parameters - ---------- - model_ws : str - Model path relative to the simulation's path. - """ - # update datasets - for key, dataset in self.datasets.items(): - if dataset.structure.file_data: - try: - file_data = dataset.get_data() - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message="Error occurred while " - "getting file data from " - '"{}"'.format(dataset.structure.name), - ) - if file_data: - # update file path location for all file paths - for file_line in file_data: - old_file_name = os.path.split(file_line[0])[1] - file_line[0] = os.path.join(model_ws, old_file_name) - # update block headers - for block_header in self.block_headers: - for dataset in block_header.data_items: - if dataset.structure.file_data: - try: - file_data = dataset.get_data() - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message="Error occurred while " - "getting file data from " - '"{}"'.format(dataset.structure.name), - ) - - if file_data: - # update file path location for all file paths - for file_line in file_data: - old_file_path, old_file_name = os.path.split( - file_line[1] - ) - new_file_path = os.path.join( - model_ws, old_file_name - ) - # update transient keys of datasets within the - # block - for key, idataset in self.datasets.items(): - if isinstance(idataset, mfdata.MFTransient): - idataset.update_transient_key( - file_line[1], new_file_path - ) - file_line[1] = os.path.join( - model_ws, old_file_name - ) - - def add_dataset(self, dataset_struct, data, var_path): - """Add data to this block.""" - try: - self.datasets[var_path[-1]] = self.data_factory( - self._simulation_data, - self._model_or_sim, - dataset_struct, - True, - var_path, - self._dimensions, - data, - self._container_package, - ) - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message="Error occurred while adding" - ' dataset "{}" to block ' - '"{}"'.format(dataset_struct.name, self.structure.name), - ) - - self._simulation_data.mfdata[var_path] = self.datasets[var_path[-1]] - dtype = dataset_struct.get_datatype() - if ( - dtype == mfstructure.DataType.list_transient - or dtype == mfstructure.DataType.list_multiple - or dtype == mfstructure.DataType.array_transient - ): - # build repeating block header(s) - if isinstance(data, dict): - # Add block headers for each dictionary key - for index in data: - if isinstance(index, tuple): - header_list = list(index) - else: - header_list = [index] - self._build_repeating_header(header_list) - elif isinstance(data, list): - # Add a single block header of value 0 - self._build_repeating_header([0]) - elif ( - dtype != mfstructure.DataType.list_multiple - and data is not None - ): - self._build_repeating_header([[0]]) - - return self.datasets[var_path[-1]] - - def _build_repeating_header(self, header_data): - if self.header_exists(header_data[0]): - return - if ( - len(self.block_headers[-1].data_items) == 1 - and self.block_headers[-1].data_items[0].get_data() is not None - ): - block_header_path = self.path + (len(self.block_headers) + 1,) - block_header = MFBlockHeader( - self.structure.name, - [], - MFComment("", self.path, self._simulation_data, 0), - self._simulation_data, - block_header_path, - self, - ) - self.block_headers.append(block_header) - else: - block_header_path = self.path + (len(self.block_headers),) - - struct = self.structure - last_header = self.block_headers[-1] - try: - last_header.build_header_variables( - self._simulation_data, - struct.block_header_structure, - block_header_path, - header_data, - self._dimensions, - ) - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message="Error occurred while building" - " block header variables for block " - '"{}"'.format(last_header.name), - ) - - def _new_dataset( - self, key, dataset_struct, block_header=False, initial_val=None - ): - dataset_path = self.path + (key,) - if block_header: - if ( - dataset_struct.type == DatumType.integer - and initial_val is not None - and len(initial_val) >= 1 - and dataset_struct.get_record_size()[0] == 1 - ): - # stress periods are stored 0 based - initial_val = int(initial_val[0]) - 1 - if isinstance(initial_val, list): - initial_val_path = tuple(initial_val) - initial_val = [tuple(initial_val)] - else: - initial_val_path = initial_val - try: - new_data = self.data_factory( - self._simulation_data, - self._model_or_sim, - dataset_struct, - True, - dataset_path, - self._dimensions, - initial_val, - self._container_package, - ) - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message="Error occurred while adding" - ' dataset "{}" to block ' - '"{}"'.format(dataset_struct.name, self.structure.name), - ) - self.block_headers[-1].add_data_item(new_data, initial_val_path) - - else: - try: - self.datasets[key] = self.data_factory( - self._simulation_data, - self._model_or_sim, - dataset_struct, - True, - dataset_path, - self._dimensions, - initial_val, - self._container_package, - ) - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message="Error occurred while adding" - ' dataset "{}" to block ' - '"{}"'.format(dataset_struct.name, self.structure.name), - ) - for keyword in dataset_struct.get_keywords(): - self.datasets_keyword[keyword] = dataset_struct - - def is_empty(self): - """Returns true if this block is empty.""" - for key, dataset in self.datasets.items(): - try: - has_data = dataset.has_data() - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message="Error occurred while verifying" - ' data of dataset "{}" in block ' - '"{}"'.format(dataset.structure.name, self.structure.name), - ) - - if has_data is not None and has_data: - return False - return True - - def load(self, block_header, fd, strict=True): - """Loads block from file object. file object must be advanced to - beginning of block before calling. - - Parameters - ---------- - block_header : MFBlockHeader - Block header for block block being loaded. - fd : file - File descriptor of file being loaded - strict : bool - Enforce strict MODFLOW 6 file format. - """ - # verify number of header variables - if ( - len(block_header.variable_strings) - < self.structure.number_non_optional_block_header_data() - ): - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - warning_str = ( - 'WARNING: Block header for block "{}" does not ' - "contain the correct number of " - "variables {}".format(block_header.name, self.path) - ) - print(warning_str) - return - - if self.loaded: - # verify header has not already been loaded - for bh_current in self.block_headers: - if bh_current.is_same_header(block_header): - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - warning_str = ( - 'WARNING: Block header for block "{}" is ' - "not a unique block header " - "{}".format(block_header.name, self.path) - ) - print(warning_str) - return - - # init - self.enabled = True - if not self.loaded: - self.block_headers = [] - block_header.block = self - self.block_headers.append(block_header) - - # process any header variable - if len(self.structure.block_header_structure) > 0: - dataset = self.structure.block_header_structure[0] - self._new_dataset( - dataset.name, - dataset, - True, - self.block_headers[-1].variable_strings, - ) - - # handle special readasarrays case - if ( - self._container_package.structure.read_as_arrays - or ( - hasattr(self._container_package, "aux") - and self._container_package.aux.structure.layered - ) - ): - # auxiliary variables may appear with aux variable name as keyword - aux_vars = self._container_package.auxiliary.get_data() - if aux_vars is not None: - for var_name in list(aux_vars[0])[1:]: - self.datasets_keyword[(var_name,)] = ( - self._container_package.aux.structure - ) - - comments = [] - - # capture any initial comments - initial_comment = MFComment("", "", 0) - fd_block = fd - line = fd_block.readline() - datautil.PyListUtil.reset_delimiter_used() - arr_line = datautil.PyListUtil.split_data_line(line) - post_data_comments = MFComment("", "", self._simulation_data, 0) - while MFComment.is_comment(line, True): - initial_comment.add_text(line) - line = fd_block.readline() - arr_line = datautil.PyListUtil.split_data_line(line) - - # if block not empty - external_file_info = None - if not (len(arr_line[0]) > 2 and arr_line[0][:3].upper() == "END"): - if arr_line[0].lower() == "open/close": - # open block contents from external file - fd_block.readline() - root_path = self._simulation_data.mfpath.get_sim_path() - try: - file_name = os.path.split(arr_line[1])[-1] - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print( - f' opening external file "{file_name}"...' - ) - external_file_info = arr_line - except: - type_, value_, traceback_ = sys.exc_info() - message = f'Error reading external file specified in line "{line}"' - raise MFDataException( - self._container_package.model_name, - self._container_package._get_pname(), - self.path, - "reading external file", - self.structure.name, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self._simulation_data.debug, - ) - if len(self.structure.data_structures) <= 1: - # load a single data set - dataset = self.datasets[next(iter(self.datasets))] - try: - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print( - f" loading data {dataset.structure.name}..." - ) - next_line = dataset.load( - line, - fd_block, - self.block_headers[-1], - initial_comment, - external_file_info, - ) - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message='Error occurred while loading data "{}" in ' - 'block "{}" from file "{}"' - ".".format( - dataset.structure.name, - self.structure.name, - fd_block.name, - ), - ) - package_info_list = self._get_package_info(dataset) - if package_info_list is not None: - for package_info in package_info_list: - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print( - f" loading child package {package_info[0]}..." - ) - fname = package_info[1] - if package_info[2] is not None: - fname = os.path.join(package_info[2], fname) - filemgr = self._simulation_data.mfpath - fname = filemgr.strip_model_relative_path( - self._model_or_sim.name, fname - ) - pkg = self._model_or_sim.load_package( - package_info[0], - fname, - package_info[1], - True, - "", - package_info[3], - self._container_package, - ) - if hasattr(self._container_package, package_info[0]): - package_group = getattr( - self._container_package, package_info[0] - ) - package_group._append_package( - pkg, pkg.filename, False - ) - - if next_line[1] is not None: - arr_line = datautil.PyListUtil.split_data_line( - next_line[1] - ) - else: - arr_line = "" - # capture any trailing comments - dataset.post_data_comments = post_data_comments - while arr_line and ( - len(next_line[1]) <= 2 or arr_line[0][:3].upper() != "END" - ): - next_line[1] = fd_block.readline().strip() - arr_line = datautil.PyListUtil.split_data_line( - next_line[1] - ) - if arr_line and ( - len(next_line[1]) <= 2 - or arr_line[0][:3].upper() != "END" - ): - post_data_comments.add_text(" ".join(arr_line)) - else: - # look for keyword and store line as data or comment - try: - key, results = self._find_data_by_keyword( - line, fd_block, initial_comment - ) - except MFInvalidTransientBlockHeaderException as e: - warning_str = f"WARNING: {e}" - print(warning_str) - self.block_headers.pop() - return - - self._save_comments(arr_line, line, key, comments) - if results[1] is None or results[1][:3].upper() != "END": - # block consists of unordered datasets - # load the data sets out of order based on - # initial constants - line = " " - while line != "": - line = fd_block.readline() - arr_line = datautil.PyListUtil.split_data_line(line) - if arr_line: - # determine if at end of block - if ( - len(arr_line[0]) > 2 - and arr_line[0][:3].upper() == "END" - ): - break - # look for keyword and store line as data o - # r comment - key, result = self._find_data_by_keyword( - line, fd_block, initial_comment - ) - self._save_comments(arr_line, line, key, comments) - if ( - result[1] is not None - and result[1][:3].upper() == "END" - ): - break - else: - # block empty, store empty array in block variables - empty_arr = [] - for ds in self.datasets.values(): - if isinstance(ds, mfdata.MFTransient): - transient_key = block_header.get_transient_key() - ds.set_data(empty_arr, key=transient_key) - self.loaded = True - self.is_valid() - - def _find_data_by_keyword(self, line, fd, initial_comment): - first_key = None - nothing_found = False - next_line = [True, line] - while next_line[0] and not nothing_found: - arr_line = datautil.PyListUtil.split_data_line(next_line[1]) - key = datautil.find_keyword(arr_line, self.datasets_keyword) - if key is not None: - ds_name = self.datasets_keyword[key].name - try: - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print(f" loading data {ds_name}...") - next_line = self.datasets[ds_name].load( - next_line[1], - fd, - self.block_headers[-1], - initial_comment, - ) - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message="Error occurred while " - 'loading data "{}" in ' - 'block "{}" from file "{}"' - ".".format(ds_name, self.structure.name, fd.name), - ) - - # see if first item's name indicates a reference to - # another package - package_info_list = self._get_package_info( - self.datasets[ds_name] - ) - if package_info_list is not None: - for package_info in package_info_list: - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print( - f" loading child package {package_info[1]}..." - ) - fname = package_info[1] - if package_info[2] is not None: - fname = os.path.join(package_info[2], fname) - filemgr = self._simulation_data.mfpath - fname = filemgr.strip_model_relative_path( - self._model_or_sim.name, fname - ) - pkg = self._model_or_sim.load_package( - package_info[0], - fname, - package_info[1], - True, - "", - package_info[3], - self._container_package, - ) - if hasattr(self._container_package, package_info[0]): - package_group = getattr( - self._container_package, package_info[0] - ) - package_group._append_package( - pkg, pkg.filename, False - ) - if first_key is None: - first_key = key - nothing_found = False - elif ( - arr_line[0].lower() == "readasarrays" - and self.path[-1].lower() == "options" - and self._container_package.structure.read_as_arrays is False - ): - error_msg = ( - "ERROR: Attempting to read a ReadAsArrays " - "package as a non-ReadAsArrays " - "package {}".format(self.path) - ) - raise ReadAsArraysException(error_msg) - else: - nothing_found = True - - if first_key is None: - # look for recarrays. if there is a lone recarray in this block, - # use it by default - recarrays = self.structure.get_all_recarrays() - if len(recarrays) != 1: - return key, [None, None] - dataset = self.datasets[recarrays[0].name] - ds_result = dataset.load( - line, fd, self.block_headers[-1], initial_comment - ) - - # see if first item's name indicates a reference to another - # package - package_info_list = self._get_package_info(dataset) - if package_info_list is not None: - for package_info in package_info_list: - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print( - f" loading child package {package_info[0]}..." - ) - fname = package_info[1] - if package_info[2] is not None: - fname = os.path.join(package_info[2], fname) - filemgr = self._simulation_data.mfpath - fname = filemgr.strip_model_relative_path( - self._model_or_sim.name, fname - ) - pkg = self._model_or_sim.load_package( - package_info[0], - fname, - None, - True, - "", - package_info[3], - self._container_package, - ) - if hasattr(self._container_package, package_info[0]): - package_group = getattr( - self._container_package, package_info[0] - ) - package_group._append_package(pkg, pkg.filename, False) - - return recarrays[0].keyword, ds_result - else: - return first_key, next_line - - def _get_package_info(self, dataset): - if not dataset.structure.file_data: - return None - for index in range(0, len(dataset.structure.data_item_structures)): - data_item = dataset.structure.data_item_structures[index] - if ( - data_item.type == DatumType.keyword - or data_item.type == DatumType.string - ): - item_name = data_item.name - package_type = item_name[:-1] - model_type = self._model_or_sim.structure.model_type - # not all packages have the same naming convention - # try different naming conventions to find the appropriate - # package - package_types = [ - package_type, - f"{self._container_package.package_type}" - f"{package_type}", - ] - package_type_found = None - for ptype in package_types: - if ( - PackageContainer.package_factory(ptype, model_type) - is not None - ): - package_type_found = ptype - break - if package_type_found is not None: - try: - data = dataset.get_data() - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message="Error occurred while " - 'getting data from "{}" ' - 'in block "{}".'.format( - dataset.structure.name, self.structure.name - ), - ) - package_info_list = [] - if isinstance(data, np.recarray): - for row in data: - self._add_to_info_list( - package_info_list, - row[index], - package_type_found, - ) - else: - self._add_to_info_list( - package_info_list, data, package_type_found - ) - - return package_info_list - return None - - def _add_to_info_list( - self, package_info_list, file_location, package_type_found - ): - file_path, file_name = os.path.split(file_location) - dict_package_name = f"{package_type_found}_{self.path[-2]}" - package_info_list.append( - ( - package_type_found, - file_name, - file_path, - dict_package_name, - ) - ) - - def _save_comments(self, arr_line, line, key, comments): - # FIX: Save these comments somewhere in the data set - if key not in self.datasets_keyword: - if MFComment.is_comment(key, True): - if comments: - comments.append("\n") - comments.append(arr_line) - - def write(self, fd, ext_file_action=ExtFileAction.copy_relative_paths): - """Writes block to a file object. - - Parameters - ---------- - fd : file object - File object to write to. - - """ - # never write an empty block - is_empty = self.is_empty() - if ( - is_empty - and self.structure.name.lower() != "exchanges" - and self.structure.name.lower() != "options" - and self.structure.name.lower() != "sources" - and self.structure.name.lower() != "stressperioddata" - ): - return - if self.structure.repeating(): - repeating_datasets = self._find_repeating_datasets() - for repeating_dataset in repeating_datasets: - # resolve any missing block headers - self._add_missing_block_headers(repeating_dataset) - for block_header in sorted(self.block_headers): - # write block - self._write_block(fd, block_header, ext_file_action) - else: - self._write_block(fd, self.block_headers[0], ext_file_action) - - def _add_missing_block_headers(self, repeating_dataset): - key_data_list = repeating_dataset.get_active_key_list() - # assemble a dictionary of data keys and empty keys - key_dict = {} - for key in key_data_list: - key_dict[key[0]] = True - for key, value in repeating_dataset.empty_keys.items(): - if value: - key_dict[key] = True - for key in key_dict.keys(): - has_data = repeating_dataset.has_data(key) - empty_key = ( - key in repeating_dataset.empty_keys - and repeating_dataset.empty_keys[key] - ) - if not self.header_exists(key) and (has_data or empty_key): - self._build_repeating_header([key]) - - def header_exists(self, key, data_path=None): - if not isinstance(key, list): - if key is None: - return - comp_key_list = [key] - else: - comp_key_list = key - for block_header in self.block_headers: - transient_key = block_header.get_transient_key(data_path) - if transient_key is True: - return - for comp_key in comp_key_list: - if transient_key is not None and transient_key == comp_key: - return True - return False - - def set_all_data_external( - self, - base_name, - check_data=True, - external_data_folder=None, - binary=False, - ): - """Sets the block's list and array data to be stored externally, - base_name is external file name's prefix, check_data determines - if data error checking is enabled during this process. - - Warning - ------- - The MF6 check mechanism is deprecated pending reimplementation - in a future release. While the checks API will remain in place - through 3.x, it may be unstable, and will likely change in 4.x. - - Parameters - ---------- - base_name : str - Base file name of external files where data will be written to. - check_data : bool - Whether to do data error checking. - external_data_folder - Folder where external data will be stored - binary: bool - Whether file will be stored as binary - - """ - - for key, dataset in self.datasets.items(): - lst_data = isinstance(dataset, mfdatalist.MFList) or isinstance( - dataset, mfdataplist.MFPandasList - ) - if ( - isinstance(dataset, mfdataarray.MFArray) - or (lst_data and dataset.structure.type == DatumType.recarray) - and dataset.enabled - ): - if not binary or ( - lst_data - and ( - dataset.data_dimensions.package_dim.boundnames() - or not dataset.structure.basic_item - ) - ): - ext = "txt" - binary = False - else: - ext = "bin" - file_path = f"{base_name}_{dataset.structure.name}.{ext}" - replace_existing_external = False - if external_data_folder is not None: - # get simulation root path - root_path = self._simulation_data.mfpath.get_sim_path() - # get model relative path, if it exists - if isinstance(self._model_or_sim, ModelInterface): - name = self._model_or_sim.name - rel_path = ( - self._simulation_data.mfpath.model_relative_path[ - name - ] - ) - if rel_path is not None: - root_path = os.path.join(root_path, rel_path) - full_path = os.path.join(root_path, external_data_folder) - if not os.path.exists(full_path): - # create new external data folder - os.makedirs(full_path) - file_path = os.path.join(external_data_folder, file_path) - replace_existing_external = True - dataset.store_as_external_file( - file_path, - replace_existing_external=replace_existing_external, - check_data=check_data, - binary=binary, - ) - - def set_all_data_internal(self, check_data=True): - """Sets the block's list and array data to be stored internally, - check_data determines if data error checking is enabled during this - process. - - Warning - ------- - The MF6 check mechanism is deprecated pending reimplementation - in a future release. While the checks API will remain in place - through 3.x, it may be unstable, and will likely change in 4.x. - - Parameters - ---------- - check_data : bool - Whether to do data error checking. - - """ - - for key, dataset in self.datasets.items(): - if ( - isinstance(dataset, mfdataarray.MFArray) - or ( - ( - isinstance(dataset, mfdatalist.MFList) - or isinstance(dataset, mfdataplist.MFPandasList) - ) - and dataset.structure.type == DatumType.recarray - ) - and dataset.enabled - ): - dataset.store_internal(check_data=check_data) - - def _find_repeating_datasets(self): - repeating_datasets = [] - for key, dataset in self.datasets.items(): - if dataset.repeating: - repeating_datasets.append(dataset) - return repeating_datasets - - def _prepare_external(self, fd, file_name, binary=False): - fd_main = fd - fd_path = self._simulation_data.mfpath.get_model_path(self.path[0]) - # resolve full file and folder path - fd_file_path = os.path.join(fd_path, file_name) - fd_folder_path = os.path.split(fd_file_path)[0] - if fd_folder_path != "": - if not os.path.exists(fd_folder_path): - # create new external data folder - os.makedirs(fd_folder_path) - return fd_main, fd_file_path - - def _write_block(self, fd, block_header, ext_file_action): - transient_key = None - basic_list = False - dataset_one = list(self.datasets.values())[0] - if isinstance( - dataset_one, - (mfdataplist.MFPandasList, mfdataplist.MFPandasTransientList), - ): - basic_list = True - for dataset in self.datasets.values(): - assert isinstance( - dataset, - ( - mfdataplist.MFPandasList, - mfdataplist.MFPandasTransientList, - ), - ) - # write block header - block_header.write_header(fd) - if len(block_header.data_items) > 0: - transient_key = block_header.get_transient_key() - - # gather data sets to write - data_set_output = [] - data_found = False - for key, dataset in self.datasets.items(): - try: - if transient_key is None: - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print( - f" writing data {dataset.structure.name}..." - ) - if basic_list: - ext_fname = dataset.external_file_name() - if ext_fname is not None: - binary = dataset.binary_ext_data() - # write block contents to external file - fd_main, fd = self._prepare_external( - fd, ext_fname, binary - ) - dataset.write_file_entry(fd, fd_main=fd_main) - fd = fd_main - else: - dataset.write_file_entry(fd) - else: - data_set_output.append( - dataset.get_file_entry( - ext_file_action=ext_file_action - ) - ) - data_found = True - else: - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print( - " writing data {} ({}).." ".".format( - dataset.structure.name, transient_key - ) - ) - if basic_list: - ext_fname = dataset.external_file_name(transient_key) - if ext_fname is not None: - binary = dataset.binary_ext_data(transient_key) - # write block contents to external file - fd_main, fd = self._prepare_external( - fd, ext_fname, binary - ) - dataset.write_file_entry( - fd, - transient_key, - ext_file_action=ext_file_action, - fd_main=fd_main, - ) - fd = fd_main - else: - dataset.write_file_entry( - fd, - transient_key, - ext_file_action=ext_file_action, - ) - else: - if dataset.repeating: - output = dataset.get_file_entry( - transient_key, ext_file_action=ext_file_action - ) - if output is not None: - data_set_output.append(output) - data_found = True - else: - data_set_output.append( - dataset.get_file_entry( - ext_file_action=ext_file_action - ) - ) - data_found = True - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message=( - "Error occurred while writing data " - f'"{dataset.structure.name}" in block ' - f'"{self.structure.name}" to file "{fd.name}"' - ), - ) - if not data_found: - return - if not basic_list: - # write block header - block_header.write_header(fd) - - if self.external_file_name is not None: - indent_string = self._simulation_data.indent_string - fd.write( - f"{indent_string}open/close " - f'"{self.external_file_name}"\n' - ) - # write block contents to external file - fd_main, fd = self._prepare_external( - fd, self.external_file_name - ) - # write data sets - for output in data_set_output: - fd.write(output) - - # write trailing comments - pth = block_header.blk_trailing_comment_path - if pth in self._simulation_data.mfdata: - self._simulation_data.mfdata[pth].write(fd) - - if self.external_file_name is not None and not basic_list: - # switch back writing to package file - fd.close() - fd = fd_main - - # write block footer - block_header.write_footer(fd) - - # write post block comments - pth = block_header.blk_post_comment_path - if pth in self._simulation_data.mfdata: - self._simulation_data.mfdata[pth].write(fd) - - # write extra line if comments are off - if not self._simulation_data.comments_on: - fd.write("\n") - - def is_allowed(self): - """Determine if block is valid based on the values of dependent - MODFLOW variables.""" - if self.structure.variable_dependant_path: - # fill in empty part of the path with the current path - if len(self.structure.variable_dependant_path) == 3: - dependant_var_path = ( - self.path[0], - ) + self.structure.variable_dependant_path - elif len(self.structure.variable_dependant_path) == 2: - dependant_var_path = ( - self.path[0], - self.path[1], - ) + self.structure.variable_dependant_path - elif len(self.structure.variable_dependant_path) == 1: - dependant_var_path = ( - self.path[0], - self.path[1], - self.path[2], - ) + self.structure.variable_dependant_path - else: - dependant_var_path = None - - # get dependency - dependant_var = None - mf_data = self._simulation_data.mfdata - if dependant_var_path in mf_data: - dependant_var = mf_data[dependant_var_path] - - # resolve dependency - if self.structure.variable_value_when_active[0] == "Exists": - exists = self.structure.variable_value_when_active[1] - if dependant_var and exists.lower() == "true": - return True - elif not dependant_var and exists.lower() == "false": - return True - else: - return False - elif not dependant_var: - return False - elif self.structure.variable_value_when_active[0] == ">": - min_val = self.structure.variable_value_when_active[1] - if dependant_var > float(min_val): - return True - else: - return False - elif self.structure.variable_value_when_active[0] == "<": - max_val = self.structure.variable_value_when_active[1] - if dependant_var < float(max_val): - return True - else: - return False - return True - - def is_valid(self): - """ - Returns true if the block is valid. - """ - # check data sets - for dataset in self.datasets.values(): - # Non-optional datasets must be enabled - if not dataset.structure.optional and not dataset.enabled: - return False - # Enabled blocks must be valid - if dataset.enabled and not dataset.is_valid: - return False - # check variables - for block_header in self.block_headers: - for dataset in block_header.data_items: - # Non-optional datasets must be enabled - if not dataset.structure.optional and not dataset.enabled: - return False - # Enabled blocks must be valid - if dataset.enabled and not dataset.is_valid(): - return False - - -class MFPackage(PackageInterface): - """ - Provides an interface for the user to specify data to build a package. - - Parameters - ---------- - parent : MFModel, MFSimulation, or MFPackage - The parent model, simulation, or package containing this package - package_type : str - String defining the package type - filename : str or PathLike - Name or path of file where this package is stored - quoted_filename : str - Filename with quotes around it when there is a space in the name - pname : str - Package name - loading_package : bool - Whether or not to add this package to the parent container's package - list during initialization - - Attributes - ---------- - blocks : dict - Dictionary of blocks contained in this package by block name - path : tuple - Data dictionary path to this package - structure : PackageStructure - Describes the blocks and data contain in this package - dimensions : PackageDimension - Resolves data dimensions for data within this package - - """ - - def __init__( - self, - parent, - package_type, - filename=None, - pname=None, - loading_package=False, - **kwargs, - ): - parent_file = kwargs.pop("parent_file", None) - if isinstance(parent, MFPackage): - self.model_or_sim = parent.model_or_sim - self.parent_file = parent - elif parent_file is not None: - self.model_or_sim = parent - self.parent_file = parent_file - else: - self.model_or_sim = parent - self.parent_file = None - _internal_package = kwargs.pop("_internal_package", False) - if _internal_package: - self.internal_package = True - else: - self.internal_package = False - self._data_list = [] - self._package_type = package_type - if self.model_or_sim.type == "Model" and package_type.lower() != "nam": - self.model_name = self.model_or_sim.name - else: - self.model_name = None - - # a package must have a dfn_file_name - if not hasattr(self, "dfn_file_name"): - self.dfn_file_name = "" - - if ( - self.model_or_sim.type != "Model" - and self.model_or_sim.type != "Simulation" - ): - message = ( - "Invalid model_or_sim parameter. Expecting either a " - 'model or a simulation. Instead type "{}" was ' - "given.".format(type(self.model_or_sim)) - ) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.model_name, - pname, - "", - "initializing package", - None, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self.model_or_sim.simulation_data.debug, - ) - - self._package_container = PackageContainer( - self.model_or_sim.simulation_data - ) - self.simulation_data = self.model_or_sim.simulation_data - - self.blocks = {} - self.container_type = [] - self.loading_package = loading_package - if pname is not None: - if not isinstance(pname, str): - message = ( - "Invalid pname parameter. Expecting type str. " - 'Instead type "{}" was ' - "given.".format(type(pname)) - ) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.model_name, - pname, - "", - "initializing package", - None, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self.model_or_sim.simulation_data.debug, - ) - - self.package_name = pname.lower() - else: - self.package_name = None - - if filename is None: - if self.model_or_sim.type == "Simulation": - # filename uses simulation base name - base_name = os.path.basename( - os.path.normpath(self.model_or_sim.name) - ) - self._filename = f"{base_name}.{package_type}" - else: - # filename uses model base name - self._filename = f"{self.model_or_sim.name}.{package_type}" - else: - if not isinstance(filename, (str, os.PathLike)): - message = ( - "Invalid fname parameter. Expecting type str. " - 'Instead type "{}" was ' - "given.".format(type(filename)) - ) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.model_name, - pname, - "", - "initializing package", - None, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self.model_or_sim.simulation_data.debug, - ) - self._filename = datautil.clean_filename( - str(filename).replace("\\", "/") - ) - self.path, self.structure = self.model_or_sim.register_package( - self, not loading_package, pname is None, filename is None - ) - self.dimensions = self.create_package_dimensions() - - if self.path is None: - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - print( - "WARNING: Package type {} failed to register property." - " {}".format(self._package_type, self.path) - ) - if self.parent_file is not None: - self.container_type.append(PackageContainerType.package) - # init variables that may be used later - self.post_block_comments = None - self.last_error = None - self.bc_color = "black" - self.__inattr = False - self._child_package_groups = {} - child_builder_call = kwargs.pop("child_builder_call", None) - if ( - self.parent_file is not None - and child_builder_call is None - and package_type in self.parent_file._child_package_groups - ): - # initialize as part of the parent's child package group - chld_pkg_grp = self.parent_file._child_package_groups[package_type] - chld_pkg_grp.init_package(self, self._filename, False) - - # remove any remaining valid kwargs - key_list = list(kwargs.keys()) - for key in key_list: - if "filerecord" in key and hasattr(self, f"{key}"): - kwargs.pop(f"{key}") - # check for extraneous kwargs - if len(kwargs) > 0: - kwargs_str = ", ".join(kwargs.keys()) - excpt_str = ( - f'Extraneous kwargs "{kwargs_str}" provided to MFPackage.' - ) - raise FlopyException(excpt_str) - - def __init_subclass__(cls): - """Register package type""" - super().__init_subclass__() - PackageContainer.packages_by_abbr[cls.package_abbr] = cls - - def __setattr__(self, name, value): - if hasattr(self, name) and getattr(self, name) is not None: - attribute = object.__getattribute__(self, name) - if attribute is not None and isinstance(attribute, mfdata.MFData): - try: - if isinstance(attribute, mfdatalist.MFList): - attribute.set_data(value, autofill=True) - else: - attribute.set_data(value) - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self.model_name, - package=self._get_pname(), - ) - return - - if all( - hasattr(self, attr) for attr in ["model_or_sim", "_package_type"] - ): - if hasattr(self.model_or_sim, "_mg_resync"): - if not self.model_or_sim._mg_resync: - self.model_or_sim._mg_resync = self._mg_resync - - super().__setattr__(name, value) - - def __repr__(self): - return self._get_data_str(True) - - def __str__(self): - return self._get_data_str(False) - - @property - def filename(self): - """Package's file name.""" - return self._filename - - @property - def quoted_filename(self): - """Package's file name with quotes if there is a space.""" - if " " in self._filename: - return f'"{self._filename}"' - return self._filename - - @filename.setter - def filename(self, fname): - """Package's file name.""" - if ( - isinstance(self.parent_file, MFPackage) - and self.package_type in self.parent_file._child_package_groups - ): - fname = datautil.clean_filename(fname) - try: - child_pkg_group = self.parent_file._child_package_groups[ - self.structure.file_type - ] - child_pkg_group._update_filename(self._filename, fname) - except Exception: - print( - "WARNING: Unable to update file name for parent" - f"package of {self.package_name}." - ) - if self.model_or_sim is not None and fname is not None: - if self._package_type != "nam": - self.model_or_sim.update_package_filename(self, fname) - self._filename = fname - - @property - def package_type(self): - """String describing type of package""" - return self._package_type - - @property - def name(self): - """Name of package""" - return [self.package_name] - - @name.setter - def name(self, name): - """Name of package""" - self.package_name = name - - @property - def parent(self): - """Parent package""" - return self.model_or_sim - - @parent.setter - def parent(self, parent): - """Parent package""" - assert False, "Do not use this setter to set the parent" - - @property - def plottable(self): - """If package is plottable""" - if self.model_or_sim.type == "Simulation": - return False - else: - return True - - @property - def output(self): - """ - Method to get output associated with a specific package - - Returns - ------- - MF6Output object - """ - return MF6Output(self) - - @property - def data_list(self): - """List of data in this package.""" - # return [data_object, data_object, ...] - return self._data_list - - @property - def package_key_dict(self): - """ - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_type_dict - - @property - def package_names(self): - """Returns a list of package names. - - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_names - - @property - def package_dict(self): - """ - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_dict - - @property - def package_type_dict(self): - """ - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_type_dict - - @property - def package_name_dict(self): - """ - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_name_dict - - @property - def package_filename_dict(self): - """ - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_filename_dict - - def netcdf_attrs(self, mesh=None): - attrs = {} - - def attr_d(tagname, iaux=None, layer=None): - tag = tagname - name = f"{self.package_name}" - if iaux: - auxvar = self.dimensions.get_aux_variables()[0] - tag = f"{tag}/{iaux}" - name = f"{name}_{auxvar[iaux]}" - else: - name = f"{name}_{tagname}" - if layer: - tag = f"{tag}/layer{layer}" - name = f"{name}_l{layer}" - - a = {} - a["varname"] = name - a["attrs"] = {} - a["attrs"]["modflow_input"] = ( - f"{self.model_name}" - f"/{self.package_name}" - f"/{tagname}" - ).upper() - if iaux: - a["attrs"]["modflow_iaux"] = iaux - if layer: - a["attrs"]["layer"] = layer - return tag, a - - for key, block in self.blocks.items(): - if key != "griddata" and key != "period": - continue - for dataset in block.datasets.values(): - if isinstance(dataset, mfdataarray.MFArray): - for index, data_item in enumerate( - dataset.structure.data_item_structures - ): - if not (dataset.structure.netcdf and dataset.has_data()): - continue - if dataset.structure.layered and mesh == "LAYERED": - if data_item.name == "aux" or data_item.name == "auxvar": - for n, auxname in enumerate(self.dimensions.get_aux_variables()[0]): - if auxname == 'auxiliary' and n == 0: - continue - for l in range(self.model_or_sim.modelgrid.nlay): - key, a = attr_d(data_item.name, n, l+1) - attrs[key] = a - else: - for l in range(self.model_or_sim.modelgrid.nlay): - key, a = attr_d(data_item.name, layer=l+1) - attrs[key] = a - else: - if data_item.name == "aux" or data_item.name == "auxvar": - for n, auxname in enumerate(self.dimensions.get_aux_variables()[0]): - if auxname == 'auxiliary' and n == 0: - continue - key, a = attr_d(data_item.name, iaux=n) - attrs[key] = a - else: - key, a = attr_d(data_item.name) - attrs[key] = a - return attrs - - def get_package(self, name=None, type_only=False, name_only=False): - """ - Finds a package by package name, package key, package type, or partial - package name. returns either a single package, a list of packages, - or None. - - Parameters - ---------- - name : str - Name or type of the package, 'my-riv-1, 'RIV', 'LPF', etc. - type_only : bool - Search for package by type only - name_only : bool - Search for package by name only - - Returns - ------- - pp : Package object - - """ - return self._package_container.get_package(name, type_only, name_only) - - def add_package(self, package): - pkg_type = package.package_type.lower() - if pkg_type in self._package_container.package_type_dict: - for existing_pkg in self._package_container.package_type_dict[ - pkg_type - ]: - if existing_pkg is package: - # do not add the same package twice - return - self._package_container.add_package(package) - - def _get_aux_data(self, aux_names): - if hasattr(self, "stress_period_data"): - spd = self.stress_period_data.get_data() - if ( - 0 in spd - and spd[0] is not None - and aux_names[0][1] in spd[0].dtype.names - ): - return spd - if hasattr(self, "packagedata"): - pd = self.packagedata.get_data() - if aux_names[0][1] in pd.dtype.names: - return pd - if hasattr(self, "perioddata"): - spd = self.perioddata.get_data() - if ( - 0 in spd - and spd[0] is not None - and aux_names[0][1] in spd[0].dtype.names - ): - return spd - if hasattr(self, "aux"): - return self.aux.get_data() - return None - - def _boundnames_active(self): - if hasattr(self, "boundnames"): - if self.boundnames.get_data(): - return True - return False - - def check(self, f=None, verbose=True, level=1, checktype=None): - """ - Data check, returns True on success. - - Warning - ------- - The MF6 check mechanism is deprecated pending reimplementation - in a future release. While the checks API will remain in place - through 3.x, it may be unstable, and will likely change in 4.x. - """ - - if checktype is None: - checktype = mf6check - # do general checks - chk = super().check(f, verbose, level, checktype) - - # do mf6 specific checks - if hasattr(self, "auxiliary"): - # auxiliary variable check - # check if auxiliary variables are defined - aux_names = self.auxiliary.get_data() - if aux_names is not None and len(aux_names[0]) > 1: - num_aux_names = len(aux_names[0]) - 1 - # check for stress period data - aux_data = self._get_aux_data(aux_names) - if aux_data is not None and len(aux_data) > 0: - # make sure the check object exists - if chk is None: - chk = self._get_check(f, verbose, level, checktype) - if isinstance(aux_data, dict): - aux_datasets = list(aux_data.values()) - else: - aux_datasets = [aux_data] - dataset_type = "unknown" - for dataset in aux_datasets: - if isinstance(dataset, np.recarray): - dataset_type = "recarray" - break - elif isinstance(dataset, np.ndarray): - dataset_type = "ndarray" - break - # if aux data is in a list - if dataset_type == "recarray": - # check for time series data - time_series_name_dict = {} - if hasattr(self, "ts") and hasattr( - self.ts, "time_series_namerecord" - ): - # build dictionary of time series data variables - ts_nr = self.ts.time_series_namerecord.get_data() - if ts_nr is not None: - for item in ts_nr: - if len(item) > 0 and item[0] is not None: - time_series_name_dict[item[0]] = True - # auxiliary variables are last unless boundnames - # defined, then second to last - if self._boundnames_active(): - offset = 1 - else: - offset = 0 - - # loop through stress period datasets with aux data - for data in aux_datasets: - if isinstance(data, np.recarray): - for row in data: - row_size = len(row) - aux_start_loc = ( - row_size - num_aux_names - offset - 1 - ) - # loop through auxiliary variables - for idx, var in enumerate( - list(aux_names[0])[1:] - ): - # get index of current aux variable - data_index = aux_start_loc + idx - # verify auxiliary value is either - # numeric or time series variable - if ( - not datautil.DatumUtil.is_float( - row[data_index] - ) - and row[data_index] - not in time_series_name_dict - ): - desc = ( - f"Invalid non-numeric " - f"value " - f"'{row[data_index]}' " - f"in auxiliary data." - ) - chk._add_to_summary( - "Error", - desc=desc, - package=self.package_name, - ) - # else if stress period data is arrays - elif dataset_type == "ndarray": - # loop through auxiliary stress period datasets - for data in aux_datasets: - # verify auxiliary value is either numeric or time - # array series variable - if isinstance(data, np.ndarray): - val = np.isnan(np.sum(data)) - if val: - desc = ( - "One or more nan values were " - "found in auxiliary data." - ) - chk._add_to_summary( - "Warning", - desc=desc, - package=self.package_name, - ) - return chk - - def _get_nan_exclusion_list(self): - excl_list = [] - if hasattr(self, "stress_period_data"): - spd_struct = self.stress_period_data.structure - for item_struct in spd_struct.data_item_structures: - if item_struct.optional or item_struct.keystring_dict: - excl_list.append(item_struct.name) - return excl_list - - def _get_data_str(self, formal, show_data=True): - data_str = ( - "package_name = {}\nfilename = {}\npackage_type = {}" - "\nmodel_or_simulation_package = {}" - "\n{}_name = {}" - "\n".format( - self._get_pname(), - self._filename, - self.package_type, - self.model_or_sim.type.lower(), - self.model_or_sim.type.lower(), - self.model_or_sim.name, - ) - ) - if self.parent_file is not None and formal: - data_str = ( - f"{data_str}parent_file = {self.parent_file._get_pname()}\n\n" - ) - else: - data_str = f"{data_str}\n" - if show_data: - for block in self.blocks.values(): - if formal: - bl_repr = repr(block) - if len(bl_repr.strip()) > 0: - data_str = ( - "{}Block {}\n--------------------\n{}" "\n".format( - data_str, block.structure.name, repr(block) - ) - ) - else: - bl_str = str(block) - if len(bl_str.strip()) > 0: - data_str = ( - "{}Block {}\n--------------------\n{}" "\n".format( - data_str, block.structure.name, str(block) - ) - ) - return data_str - - def _get_pname(self): - if self.package_name is not None: - return str(self.package_name) - else: - return str(self._filename) - - def _get_block_header_info(self, line, path): - # init - header_variable_strs = [] - arr_clean_line = line.strip().split() - header_comment = MFComment( - "", path + (arr_clean_line[1],), self.simulation_data, 0 - ) - # break header into components - if len(arr_clean_line) < 2: - message = ( - "Block header does not contain a name. Name " - 'expected in line "{}".'.format(line) - ) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.model_name, - self._get_pname(), - self.path, - "parsing block header", - None, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self.simulation_data.debug, - ) - elif len(arr_clean_line) == 2: - return MFBlockHeader( - arr_clean_line[1], - header_variable_strs, - header_comment, - self.simulation_data, - path, - ) - else: - # process text after block name - comment = False - for entry in arr_clean_line[2:]: - # if start of comment - if MFComment.is_comment(entry.strip()[0]): - comment = True - if comment: - header_comment.text = " ".join( - [header_comment.text, entry] - ) - else: - header_variable_strs.append(entry) - return MFBlockHeader( - arr_clean_line[1], - header_variable_strs, - header_comment, - self.simulation_data, - path, - ) - - def _update_size_defs(self): - # build temporary data lookup by name - data_lookup = {} - for block in self.blocks.values(): - for dataset in block.datasets.values(): - data_lookup[dataset.structure.name] = dataset - - # loop through all data - for block in self.blocks.values(): - for dataset in block.datasets.values(): - # if data shape is 1-D - if ( - dataset.structure.shape - and len(dataset.structure.shape) == 1 - ): - # if shape name is data in this package - if dataset.structure.shape[0] in data_lookup: - size_def = data_lookup[dataset.structure.shape[0]] - size_def_name = size_def.structure.name - - if isinstance(dataset, mfdata.MFTransient): - # for transient data always use the maximum size - new_size = -1 - for key in dataset.get_active_key_list(): - try: - data = dataset.get_data(key=key[0]) - except (OSError, MFDataException): - # TODO: Handle case where external file - # path has been moved - data = None - if data is not None: - data_len = len(data) - if data_len > new_size: - new_size = data_len - else: - # for all other data set max to size - new_size = -1 - try: - data = dataset.get_data() - except (OSError, MFDataException): - # TODO: Handle case where external file - # path has been moved - data = None - if data is not None: - new_size = len(dataset.get_data()) - - if size_def.get_data() is None: - current_size = -1 - else: - current_size = size_def.get_data() - - if new_size > current_size: - # store current size - size_def.set_data(new_size) - - # informational message to the user - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - print( - "INFORMATION: {} in {} changed to {} " - "based on size of {}".format( - size_def_name, - size_def.structure.path[:-1], - new_size, - dataset.structure.name, - ) - ) - - def inspect_cells(self, cell_list, stress_period=None): - """ - Inspect model cells. Returns package data associated with cells. - - Parameters - ---------- - cell_list : list of tuples - List of model cells. Each model cell is a tuple of integers. - ex: [(1,1,1), (2,4,3)] - stress_period : int - For transient data, only return data from this stress period. If - not specified or None, all stress period data will be returned. - - Returns - ------- - output : array - Array containing inspection results - - """ - data_found = [] - - # loop through blocks - local_index_names = [] - local_index_blocks = [] - local_index_values = [] - local_index_cellids = [] - # loop through blocks in package - for block in self.blocks.values(): - # loop through data in block - for dataset in block.datasets.values(): - if isinstance(dataset, mfdatalist.MFList): - # handle list data - cellid_column = None - local_index_name = None - # loop through list data column definitions - for index, data_item in enumerate( - dataset.structure.data_item_structures - ): - if index == 0 and data_item.type == DatumType.integer: - local_index_name = data_item.name - # look for cellid column in list data row - if isinstance(data_item, MFDataItemStructure) and ( - data_item.is_cellid or data_item.possible_cellid - ): - cellid_column = index - break - if cellid_column is not None: - data_output = DataSearchOutput(dataset.path) - local_index_vals = [] - local_index_cells = [] - # get data - if isinstance(dataset, mfdatalist.MFTransientList): - # data may be in multiple transient blocks, get - # data from appropriate blocks - main_data = dataset.get_data(stress_period) - if stress_period is not None: - main_data = {stress_period: main_data} - else: - # data is all in one block, get data - main_data = {-1: dataset.get_data()} - - # loop through each dataset - for key, value in main_data.items(): - if value is None: - continue - if data_output.data_header is None: - data_output.data_header = value.dtype.names - # loop through list data rows - for line in value: - # loop through list of cells we are searching - # for - for cell in cell_list: - if isinstance( - line[cellid_column], tuple - ) and cellids_equal( - line[cellid_column], cell - ): - # save data found - data_output.data_entries.append(line) - data_output.data_entry_ids.append(cell) - data_output.data_entry_stress_period.append( - key - ) - if datautil.DatumUtil.is_int(line[0]): - # save index data for further - # processing. assuming index is - # always first entry - local_index_vals.append(line[0]) - local_index_cells.append(cell) - - if ( - local_index_name is not None - and len(local_index_vals) > 0 - ): - # capture index lookups for scanning related data - local_index_names.append(local_index_name) - local_index_blocks.append(block.path[-1]) - local_index_values.append(local_index_vals) - local_index_cellids.append(local_index_cells) - if len(data_output.data_entries) > 0: - data_found.append(data_output) - elif isinstance(dataset, mfdataarray.MFArray): - # handle array data - data_shape = copy.deepcopy( - dataset.structure.data_item_structures[0].shape - ) - if dataset.path[-1] == "top": - # top is a special case where the two datasets - # need to be combined to get the correct layer top - model_grid = self.model_or_sim.modelgrid - main_data = {-1: model_grid.top_botm} - data_shape.append("nlay") - else: - if isinstance(dataset, mfdataarray.MFTransientArray): - # data may be in multiple blocks, get data from - # appropriate blocks - main_data = dataset.get_data(stress_period) - if stress_period is not None: - main_data = {stress_period: main_data} - else: - # data is all in one block, get a process data - main_data = {-1: dataset.get_data()} - if main_data is None: - continue - data_output = DataSearchOutput(dataset.path) - # loop through datasets - for key, array_data in main_data.items(): - if array_data is None: - continue - self.model_or_sim.match_array_cells( - cell_list, data_shape, array_data, key, data_output - ) - if len(data_output.data_entries) > 0: - data_found.append(data_output) - - if len(local_index_names) > 0: - # look for data that shares the index value with data found - # for example a shared well or reach number - for block in self.blocks.values(): - # loop through data - for dataset in block.datasets.values(): - if isinstance(dataset, mfdatalist.MFList): - data_item = dataset.structure.data_item_structures[0] - data_output = DataSearchOutput(dataset.path) - # loop through previous data found - for ( - local_index_name, - local_index_vals, - cell_ids, - local_block_name, - ) in zip( - local_index_names, - local_index_values, - local_index_cellids, - local_index_blocks, - ): - if local_block_name == block.path[-1]: - continue - if ( - isinstance(data_item, MFDataItemStructure) - and data_item.name == local_index_name - and data_item.type == DatumType.integer - ): - # matching data index type found, get data - if isinstance( - dataset, mfdatalist.MFTransientList - ): - # data may be in multiple blocks, get data - # from appropriate blocks - main_data = dataset.get_data(stress_period) - if stress_period is not None: - main_data = {stress_period: main_data} - else: - # data is all in one block - main_data = {-1: dataset.get_data()} - # loop through the data - for key, value in main_data.items(): - if value is None: - continue - if data_output.data_header is None: - data_output.data_header = ( - value.dtype.names - ) - # loop through each row of data - for line in value: - # loop through the index values we are - # looking for - for index_val, cell_id in zip( - local_index_vals, cell_ids - ): - # try to match index values we are - # looking for to the data - if index_val == line[0]: - # save data found - data_output.data_entries.append( - line - ) - data_output.data_entry_ids.append( - index_val - ) - data_output.data_entry_cellids.append( - cell_id - ) - data_output.data_entry_stress_period.append( - key - ) - if len(data_output.data_entries) > 0: - data_found.append(data_output) - return data_found - - def remove(self): - """Removes this package from the simulation/model it is currently a - part of. - """ - self.model_or_sim.remove_package(self) - - def build_child_packages_container(self, pkg_type, filerecord): - """Builds a container object for any child packages. This method is - only intended for FloPy internal use.""" - # get package class - package_obj = PackageContainer.package_factory( - pkg_type, self.model_or_sim.model_type - ) - # create child package object - child_pkgs_name = f"utl{pkg_type}packages" - child_pkgs_obj = PackageContainer.package_factory(child_pkgs_name, "") - if child_pkgs_obj is None and self.model_or_sim.model_type is None: - # simulation level object, try just the package type in the name - child_pkgs_name = f"{pkg_type}packages" - child_pkgs_obj = PackageContainer.package_factory( - child_pkgs_name, "" - ) - if child_pkgs_obj is None: - # see if the package is part of one of the supported model types - for model_type in MFStructure().sim_struct.model_types: - child_pkgs_name = f"{model_type}{pkg_type}packages" - child_pkgs_obj = PackageContainer.package_factory( - child_pkgs_name, "" - ) - if child_pkgs_obj is not None: - break - child_pkgs = child_pkgs_obj( - self.model_or_sim, self, pkg_type, filerecord, None, package_obj - ) - setattr(self, pkg_type, child_pkgs) - self._child_package_groups[pkg_type] = child_pkgs - - def _get_dfn_name_dict(self): - dfn_name_dict = {} - item_num = 0 - for item in self.structure.dfn_list: - if len(item) > 1: - item_name = item[1].split() - if len(item_name) > 1 and item_name[0] == "name": - dfn_name_dict[item_name[1]] = item_num - item_num += 1 - return dfn_name_dict - - def build_child_package(self, pkg_type, data, parameter_name, filerecord): - """Builds a child package. This method is only intended for FloPy - internal use.""" - if not hasattr(self, pkg_type): - self.build_child_packages_container(pkg_type, filerecord) - if data is not None: - package_group = getattr(self, pkg_type) - # build child package file name - child_path = package_group.next_default_file_path() - # create new empty child package - package_obj = PackageContainer.package_factory( - pkg_type, self.model_or_sim.model_type - ) - package = package_obj( - self, filename=child_path, child_builder_call=True - ) - assert hasattr(package, parameter_name) - - if isinstance(data, dict): - # order data correctly - dfn_name_dict = package._get_dfn_name_dict() - ordered_data_items = [] - for key, value in data.items(): - if key in dfn_name_dict: - ordered_data_items.append( - [dfn_name_dict[key], key, value] - ) - else: - ordered_data_items.append([999999, key, value]) - ordered_data_items = sorted( - ordered_data_items, key=lambda x: x[0] - ) - - # evaluate and add data to package - unused_data = {} - for order, key, value in ordered_data_items: - # if key is an attribute of the child package - if isinstance(key, str) and hasattr(package, key): - # set child package attribute - child_data_attr = getattr(package, key) - if isinstance(child_data_attr, mfdatalist.MFList): - child_data_attr.set_data(value, autofill=True) - elif isinstance(child_data_attr, mfdata.MFData): - child_data_attr.set_data(value) - elif key == "fname" or key == "filename": - child_path = value - package._filename = value - else: - setattr(package, key, value) - else: - unused_data[key] = value - if unused_data: - setattr(package, parameter_name, unused_data) - else: - setattr(package, parameter_name, data) - - # append package to list - package_group.init_package(package, child_path) - return package - - def build_mfdata(self, var_name, data=None): - """Returns the appropriate data type object (mfdatalist, mfdataarray, - or mfdatascalar) given that object the appropriate structure (looked - up based on var_name) and any data supplied. This method is for - internal FloPy library use only. - - Parameters - ---------- - var_name : str - Variable name - - data : many supported types - Data contained in this object - - Returns - ------- - data object : MFData subclass - - """ - if self.loading_package: - data = None - for key, block in self.structure.blocks.items(): - if var_name in block.data_structures: - if block.name not in self.blocks: - self.blocks[block.name] = MFBlock( - self.simulation_data, - self.dimensions, - block, - self.path + (key,), - self.model_or_sim, - self, - ) - dataset_struct = block.data_structures[var_name] - var_path = self.path + (key, var_name) - ds = self.blocks[block.name].add_dataset( - dataset_struct, data, var_path - ) - self._data_list.append(ds) - return ds - - message = 'Unable to find variable "{}" in package ' '"{}".'.format( - var_name, self.package_type - ) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.model_name, - self._get_pname(), - self.path, - "building data objects", - None, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self.simulation_data.debug, - ) - - def set_model_relative_path(self, model_ws): - """Sets the model path relative to the simulation's path. - - Parameters - ---------- - model_ws : str - Model path relative to the simulation's path. - - """ - # update blocks - for key, block in self.blocks.items(): - block.set_model_relative_path(model_ws) - # update sub-packages - for package in self._package_container.packagelist: - package.set_model_relative_path(model_ws) - - def set_all_data_external( - self, - check_data=True, - external_data_folder=None, - base_name=None, - binary=False, - ): - """Sets the package's list and array data to be stored externally. - - Parameters - ---------- - check_data : bool - Determine if data error checking is enabled - external_data_folder - Folder where external data will be stored - base_name: str - Base file name prefix for all files - binary: bool - Whether file will be stored as binary - """ - # set blocks - for key, block in self.blocks.items(): - file_name = os.path.split(self.filename)[1] - if base_name is not None: - file_name = f"{base_name}_{file_name}" - block.set_all_data_external( - file_name, - check_data, - external_data_folder, - binary, - ) - # set sub-packages - for package in self._package_container.packagelist: - package.set_all_data_external( - check_data, - external_data_folder, - base_name, - binary, - ) - - def set_all_data_internal(self, check_data=True): - """Sets the package's list and array data to be stored internally. - - Parameters - ---------- - check_data : bool - Determine if data error checking is enabled - - """ - # set blocks - for key, block in self.blocks.items(): - block.set_all_data_internal(check_data) - # set sub-packages - for package in self._package_container.packagelist: - package.set_all_data_internal(check_data) - - def load(self, strict=True): - """Loads the package from file. - - Parameters - ---------- - strict : bool - Enforce strict checking of data. - - Returns - ------- - success : bool - - """ - # open file - try: - fd_input_file = open( - datautil.clean_filename(self.get_file_path()), "r" - ) - except OSError as e: - if e.errno == errno.ENOENT: - message = "File {} of type {} could not be opened.".format( - self.get_file_path(), self.package_type - ) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.model_name, - self.package_name, - self.path, - "loading package file", - None, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self.simulation_data.debug, - ) - - try: - self._load_blocks(fd_input_file, strict) - except ReadAsArraysException as err: - fd_input_file.close() - raise ReadAsArraysException(err) - # close file - fd_input_file.close() - - if self.simulation_data.auto_set_sizes: - self._update_size_defs() - - # return validity of file - return self.is_valid() - - def is_valid(self): - """Returns whether or not this package is valid. - - Returns - ------- - is valid : bool - - """ - # Check blocks - for block in self.blocks.values(): - # Non-optional blocks must be enabled - if ( - block.structure.number_non_optional_data() > 0 - and not block.enabled - and block.is_allowed() - ): - self.last_error = ( - f'Required block "{block.block_header.name}" not enabled' - ) - return False - # Enabled blocks must be valid - if block.enabled and not block.is_valid: - self.last_error = f'Invalid block "{block.block_header.name}"' - return False - - return True - - def _load_blocks(self, fd_input_file, strict=True, max_blocks=sys.maxsize): - # init - self.simulation_data.mfdata[self.path + ("pkg_hdr_comments",)] = ( - MFComment("", self.path, self.simulation_data) - ) - self.post_block_comments = MFComment( - "", self.path, self.simulation_data - ) - - blocks_read = 0 - found_first_block = False - line = " " - while line != "": - line = fd_input_file.readline() - clean_line = line.strip() - # If comment or empty line - if MFComment.is_comment(clean_line, True): - self._store_comment(line, found_first_block) - elif len(clean_line) > 4 and clean_line[:5].upper() == "BEGIN": - # parse block header - try: - block_header_info = self._get_block_header_info( - line, self.path - ) - except MFDataException as mfde: - message = ( - "An error occurred while loading block header " - 'in line "{}".'.format(line) - ) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.model_name, - self._get_pname(), - self.path, - "loading block header", - None, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self.simulation_data.debug, - mfde, - ) - - # if there is more than one possible block with the same name, - # resolve the correct block to use - block_key = block_header_info.name.lower() - block_num = 1 - possible_key = f"{block_header_info.name.lower()}-{block_num}" - if possible_key in self.blocks: - block_key = possible_key - block_header_name = block_header_info.name.lower() - while ( - block_key in self.blocks - and not self.blocks[block_key].is_allowed() - ): - block_key = f"{block_header_name}-{block_num}" - block_num += 1 - - if block_key not in self.blocks: - # block name not recognized, load block as comments and - # issue a warning - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - warning_str = ( - 'WARNING: Block "{}" is not a valid block ' - "name for file type " - "{}.".format(block_key, self.package_type) - ) - print(warning_str) - self._store_comment(line, found_first_block) - while line != "": - line = fd_input_file.readline() - self._store_comment(line, found_first_block) - arr_line = datautil.PyListUtil.split_data_line(line) - if arr_line and ( - len(arr_line[0]) <= 2 - or arr_line[0][:3].upper() == "END" - ): - break - else: - found_first_block = True - skip_block = False - cur_block = self.blocks[block_key] - if cur_block.loaded: - # Only blocks defined as repeating are allowed to have - # multiple entries - header_name = block_header_info.name - if not self.structure.blocks[ - header_name.lower() - ].repeating(): - # warn and skip block - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - warning_str = ( - 'WARNING: Block "{}" has ' - "multiple entries and is not " - "intended to be a repeating " - "block ({} package" - ")".format(header_name, self.package_type) - ) - print(warning_str) - skip_block = True - bhs = cur_block.structure.block_header_structure - bhval = block_header_info.variable_strings - if ( - len(bhs) > 0 - and len(bhval) > 0 - and bhs[0].name == "iper" - ): - nper = self.simulation_data.mfdata[ - ("tdis", "dimensions", "nper") - ].get_data() - bhval_int = datautil.DatumUtil.is_int(bhval[0]) - if not bhval_int or int(bhval[0]) > nper: - # skip block when block stress period is greater - # than nper - skip_block = True - - if not skip_block: - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print( - f" loading block {cur_block.structure.name}..." - ) - # reset comments - self.post_block_comments = MFComment( - "", self.path, self.simulation_data - ) - - cur_block.load( - block_header_info, fd_input_file, strict - ) - - # write post block comment - self.simulation_data.mfdata[ - cur_block.block_headers[-1].blk_post_comment_path - ] = self.post_block_comments - - blocks_read += 1 - if blocks_read >= max_blocks: - break - else: - # treat skipped block as if it is all comments - arr_line = datautil.PyListUtil.split_data_line( - clean_line - ) - self.post_block_comments.add_text(str(line), True) - while arr_line and ( - len(line) <= 2 or arr_line[0][:3].upper() != "END" - ): - line = fd_input_file.readline() - arr_line = datautil.PyListUtil.split_data_line( - line.strip() - ) - if arr_line: - self.post_block_comments.add_text( - str(line), True - ) - self.simulation_data.mfdata[ - cur_block.block_headers[-1].blk_post_comment_path - ] = self.post_block_comments - - else: - if not ( - len(clean_line) == 0 - or (len(line) > 2 and line[:3].upper() == "END") - ): - # Record file location of beginning of unresolved text - # treat unresolved text as a comment for now - self._store_comment(line, found_first_block) - - def write(self, ext_file_action=ExtFileAction.copy_relative_paths): - """Writes the package to a file. - - Parameters - ---------- - ext_file_action : ExtFileAction - How to handle pathing of external data files. - """ - if self.simulation_data.auto_set_sizes: - self._update_size_defs() - - # create any folders in path - package_file_path = self.get_file_path() - package_folder = os.path.split(package_file_path)[0] - if package_folder and not os.path.isdir(package_folder): - os.makedirs(os.path.split(package_file_path)[0]) - - # open file - fd = open(package_file_path, "w") - - # write flopy header - if self.simulation_data.write_headers: - dt = datetime.datetime.now() - header = ( - "# File generated by Flopy version {} on {} at {}." - "\n".format( - __version__, - dt.strftime("%m/%d/%Y"), - dt.strftime("%H:%M:%S"), - ) - ) - fd.write(header) - - # write blocks - self._write_blocks(fd, ext_file_action) - - fd.close() - - def create_package_dimensions(self): - """Creates a package dimensions object. For internal FloPy library - use. - - Returns - ------- - package dimensions : PackageDimensions - - """ - model_dims = None - if self.container_type[0] == PackageContainerType.model: - model_dims = [ - modeldimensions.ModelDimensions( - self.path[0], self.simulation_data - ) - ] - else: - # this is a simulation file that does not correspond to a specific - # model. figure out which model to use and return a dimensions - # object for that model - if self.dfn_file_name[0:3] == "exg": - exchange_rec_array = self.simulation_data.mfdata[ - ("nam", "exchanges", "exchanges") - ].get_data() - if exchange_rec_array is None: - return None - for exchange in exchange_rec_array: - if exchange[1].lower() == self._filename.lower(): - model_dims = [ - modeldimensions.ModelDimensions( - exchange[2], self.simulation_data - ), - modeldimensions.ModelDimensions( - exchange[3], self.simulation_data - ), - ] - break - elif ( - self.dfn_file_name[4:7] == "gnc" - and self.model_or_sim.type == "Simulation" - ): - # get exchange file name associated with gnc package - if self.parent_file is not None: - exg_file_name = self.parent_file.filename - else: - raise Exception( - "Can not create a simulation-level " - "gnc file without a corresponding " - "exchange file. Exchange file must be " - "created first." - ) - # get models associated with exchange file from sim nam file - try: - exchange_recarray_data = ( - self.model_or_sim.name_file.exchanges.get_data() - ) - except MFDataException as mfde: - message = ( - "An error occurred while retrieving exchange " - "data from the simulation name file. The error " - "occurred while processing gnc file " - f'"{self.filename}".' - ) - raise MFDataException( - mfdata_except=mfde, - package=self._get_pname(), - message=message, - ) - assert exchange_recarray_data is not None - model_1 = None - model_2 = None - for exchange in exchange_recarray_data: - if exchange[1] == exg_file_name: - model_1 = exchange[2] - model_2 = exchange[3] - - # assign models to gnc package - model_dims = [ - modeldimensions.ModelDimensions( - model_1, self.simulation_data - ), - modeldimensions.ModelDimensions( - model_2, self.simulation_data - ), - ] - elif self.parent_file is not None: - model_dims = [] - for md in self.parent_file.dimensions.model_dim: - model_name = md.model_name - model_dims.append( - modeldimensions.ModelDimensions( - model_name, self.simulation_data - ) - ) - else: - model_dims = [ - modeldimensions.ModelDimensions(None, self.simulation_data) - ] - return modeldimensions.PackageDimensions( - model_dims, self.structure, self.path - ) - - def _store_comment(self, line, found_first_block): - # Store comment - if found_first_block: - self.post_block_comments.text += line - else: - self.simulation_data.mfdata[ - self.path + ("pkg_hdr_comments",) - ].text += line - - def _write_blocks(self, fd, ext_file_action): - # verify that all blocks are valid - if not self.is_valid(): - message = ( - 'Unable to write out model file "{}" due to the ' - "following error: " - "{} ({})".format(self._filename, self.last_error, self.path) - ) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.model_name, - self._get_pname(), - self.path, - "writing package blocks", - None, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self.simulation_data.debug, - ) - - # write initial comments - pkg_hdr_comments_path = self.path + ("pkg_hdr_comments",) - if pkg_hdr_comments_path in self.simulation_data.mfdata: - self.simulation_data.mfdata[ - self.path + ("pkg_hdr_comments",) - ].write(fd, False) - - # loop through blocks - block_num = 1 - for block in self.blocks.values(): - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print(f" writing block {block.structure.name}...") - # write block - block.write(fd, ext_file_action=ext_file_action) - block_num += 1 - - def get_file_path(self): - """Returns the package file's path. - - Returns - ------- - file path : str - """ - if self.path[0] in self.simulation_data.mfpath.model_relative_path: - return os.path.join( - self.simulation_data.mfpath.get_model_path(self.path[0]), - self._filename, - ) - else: - return os.path.join( - self.simulation_data.mfpath.get_sim_path(), self._filename - ) - - def export(self, f, **kwargs): - """ - Method to export a package to netcdf or shapefile based on the - extension of the file name (.shp for shapefile, .nc for netcdf) - - Parameters - ---------- - f : str - Filename - kwargs : keyword arguments - modelgrid : flopy.discretization.Grid instance - User supplied modelgrid which can be used for exporting - in lieu of the modelgrid associated with the model object - - Returns - ------- - None or Netcdf object - - """ - from .. import export - - return export.utils.package_export(f, self, **kwargs) - - def plot(self, **kwargs): - """ - Plot 2-D, 3-D, transient 2-D, and stress period list (MfList) - package input data - - Parameters - ---------- - **kwargs : dict - filename_base : str - Base file name that will be used to automatically generate - file names for output image files. Plots will be exported as - image files if file_name_base is not None. (default is None) - file_extension : str - Valid matplotlib.pyplot file extension for savefig(). Only - used if filename_base is not None. (default is 'png') - mflay : int - MODFLOW zero-based layer number to return. If None, then all - all layers will be included. (default is None) - kper : int - MODFLOW zero-based stress period number to return. (default is - zero) - key : str - MfList dictionary key. (default is None) - - Returns - ------- - axes : list - Empty list is returned if filename_base is not None. Otherwise - a list of matplotlib.pyplot.axis are returned. - - """ - from ..plot.plotutil import PlotUtilities - - if not self.plottable: - raise TypeError("Simulation level packages are not plottable") - - axes = PlotUtilities._plot_package_helper(self, **kwargs) - return axes - - -class MFChildPackages: - """ - Behind the scenes code for creating an interface to access child packages - from a parent package. This class is automatically constructed by the - FloPy library and is for internal library use only. - - Parameters - ---------- - """ - - def __init__( - self, - model_or_sim, - parent, - pkg_type, - filerecord, - package=None, - package_class=None, - ): - self._packages = [] - self._filerecord = filerecord - if package is not None: - self._packages.append(package) - self._model_or_sim = model_or_sim - self._cpparent = parent - self._pkg_type = pkg_type - self._package_class = package_class - - def __init_subclass__(cls): - """Register package""" - super().__init_subclass__() - PackageContainer.packages_by_abbr[cls.package_abbr] = cls - - def __getattr__(self, attr): - if ( - "_packages" in self.__dict__ - and len(self._packages) > 0 - and hasattr(self._packages[0], attr) - ): - item = getattr(self._packages[0], attr) - return item - raise AttributeError(attr) - - def __getitem__(self, k): - if isinstance(k, int): - if k < len(self._packages): - return self._packages[k] - raise ValueError(f"Package index {k} does not exist.") - - def __setattr__(self, key, value): - if ( - key != "_packages" - and key != "_model_or_sim" - and key != "_cpparent" - and key != "_inattr" - and key != "_filerecord" - and key != "_package_class" - and key != "_pkg_type" - ): - if len(self._packages) == 0: - raise Exception( - "No {} package is currently attached to package" - " {}. Use the initialize method to create a(n) " - "{} package before attempting to access its " - "properties.".format( - self._pkg_type, self._cpparent.filename, self._pkg_type - ) - ) - package = self._packages[0] - setattr(package, key, value) - return - super().__setattr__(key, value) - - def __default_file_path_base(self, file_path, suffix=""): - stem = os.path.split(file_path)[1] - stem_lst = stem.split(".") - file_name = ".".join(stem_lst[:-1]) - if len(stem_lst) > 1: - file_ext = stem_lst[-1] - return f"{file_name}.{file_ext}{suffix}.{self._pkg_type}" - elif suffix != "": - return f"{stem}.{self._pkg_type}" - else: - return f"{stem}.{suffix}.{self._pkg_type}" - - def __file_path_taken(self, possible_path): - for package in self._packages: - # Do case insensitive compare - if package.filename.lower() == possible_path.lower(): - return True - return False - - def next_default_file_path(self): - possible_path = self.__default_file_path_base(self._cpparent.filename) - suffix = 0 - while self.__file_path_taken(possible_path): - possible_path = self.__default_file_path_base( - self._cpparent.filename, suffix - ) - suffix += 1 - return possible_path - - def init_package(self, package, fname, remove_packages=True): - if remove_packages: - # clear out existing packages - self._remove_packages() - elif fname is not None: - self._remove_packages(fname) - if fname is None: - # build a file name - fname = self.next_default_file_path() - package._filename = fname - # check file record variable - found = False - fr_data = self._filerecord.get_data() - if fr_data is not None: - for line in fr_data: - if line[0] == fname: - found = True - if not found: - # append file record variable - self._filerecord.append_data([(fname,)]) - # add the package to the list - self._packages.append(package) - - def _update_filename(self, old_fname, new_fname): - file_record = self._filerecord.get_data() - new_file_record_data = [] - if file_record is not None: - file_record_data = file_record[0] - for item in file_record_data: - base, fname = os.path.split(item) - if fname.lower() == old_fname.lower(): - if base: - new_file_record_data.append( - (os.path.join(base, new_fname),) - ) - else: - new_file_record_data.append((new_fname,)) - else: - new_file_record_data.append((item,)) - else: - new_file_record_data.append((new_fname,)) - self._filerecord.set_data(new_file_record_data) - - def _append_package(self, package, fname, update_frecord=True): - if fname is None: - # build a file name - fname = self.next_default_file_path() - package._filename = fname - - if update_frecord: - # set file record variable - file_record = self._filerecord.get_data() - file_record_data = file_record - new_file_record_data = [] - for item in file_record_data: - new_file_record_data.append((item[0],)) - new_file_record_data.append((fname,)) - self._filerecord.set_data(new_file_record_data) - - for existing_pkg in self._packages: - if existing_pkg is package: - # do not add the same package twice - return - # add the package to the list - self._packages.append(package) - - def _remove_packages(self, fname=None, only_pop_from_list=False): - rp_list = [] - for idx, package in enumerate(self._packages): - if fname is None or package.filename == fname: - if not only_pop_from_list: - self._model_or_sim.remove_package(package) - rp_list.append(idx) - for idx in reversed(rp_list): - self._packages.pop(idx) diff --git a/flopy/mf6/tmp/mfstructure.py b/flopy/mf6/tmp/mfstructure.py deleted file mode 100644 index 92cd0be3b6..0000000000 --- a/flopy/mf6/tmp/mfstructure.py +++ /dev/null @@ -1,2113 +0,0 @@ -""" -mfstructure module. Contains classes related to package structure - - -""" - -import ast -import keyword -import os -from enum import Enum -from textwrap import TextWrapper - -import numpy as np - -from ..mfbase import StructException - -numeric_index_text = ( - "This argument is an index variable, which means that " - "it should be treated as zero-based when working with " - "FloPy and Python. Flopy will automatically subtract " - "one when loading index variables and add one when " - "writing index variables." -) - - -class DfnType(Enum): - common = 1 - sim_name_file = 2 - sim_tdis_file = 3 - ims_file = 4 - exch_file = 5 - model_name_file = 6 - model_file = 7 - gnc_file = 8 - mvr_file = 9 - utl = 10 - mvt_file = 11 - unknown = 999 - - -class Dfn: - """ - Base class for package file definitions - - Attributes - ---------- - dfndir : path - folder containing package definition files (dfn) - common : path - file containing common information - - Methods - ------- - get_file_list : () : list - returns all of the dfn files found in dfndir. files are returned in - a specified order defined in the local variable file_order - - See Also - -------- - - Notes - ----- - - Examples - -------- - """ - - def __init__(self): - # directories - self.dfndir = os.path.join(".", "dfn") - self.common = os.path.join(self.dfndir, "common.dfn") - - def get_file_list(self): - file_order = [ - "sim-nam", # dfn completed tex updated - "sim-tdis", # dfn completed tex updated - "exg-gwfgwf", # dfn completed tex updated - "sln-ims", # dfn completed tex updated - "gwf-nam", # dfn completed tex updated - "gwf-dis", # dfn completed tex updated - "gwf-disv", # dfn completed tex updated - "gwf-disu", # dfn completed tex updated - "gwf-ic", # dfn completed tex updated - "gwf-npf", # dfn completed tex updated - "gwf-sto", # dfn completed tex updated - "gwf-hfb", # dfn completed tex updated - "gwf-chd", # dfn completed tex updated - "gwf-wel", # dfn completed tex updated - "gwf-drn", # dfn completed tex updated - "gwf-riv", # dfn completed tex updated - "gwf-ghb", # dfn completed tex updated - "gwf-rch", # dfn completed tex updated - "gwf-rcha", # dfn completed tex updated - "gwf-evt", # dfn completed tex updated - "gwf-evta", # dfn completed tex updated - "gwf-maw", # dfn completed tex updated - "gwf-sfr", # dfn completed tex updated - "gwf-lak", # dfn completed tex updated - "gwf-uzf", # dfn completed tex updated - "gwf-mvr", # dfn completed tex updated - "gwf-gnc", # dfn completed tex updated - "gwf-oc", # dfn completed tex updated - "utl-obs", - "utl-ts", - "utl-tab", - "utl-tas", - ] - - dfn_path, tail = os.path.split(os.path.realpath(__file__)) - dfn_path = os.path.join(dfn_path, "dfn") - # construct list of dfn files to process in the order of file_order - files = os.listdir(dfn_path) - for f in files: - if "common" in f or "flopy" in f: - continue - package_abbr = os.path.splitext(f)[0] - if package_abbr not in file_order: - file_order.append(package_abbr) - return [f"{fname}.dfn" for fname in file_order if f"{fname}.dfn" in files] - - def _file_type(self, file_name): - # determine file type - if len(file_name) >= 6 and file_name[0:6] == "common": - return DfnType.common, None - elif file_name[0:3] == "sim": - if file_name[3:6] == "nam": - return DfnType.sim_name_file, None - elif file_name[3:7] == "tdis": - return DfnType.sim_tdis_file, None - else: - return DfnType.unknown, None - elif file_name[0:3] == "nam": - return DfnType.sim_name_file, None - elif file_name[0:4] == "tdis": - return DfnType.sim_tdis_file, None - elif file_name[0:3] == "sln" or file_name[0:3] == "ims": - return DfnType.ims_file, None - elif file_name[0:3] == "exg": - return DfnType.exch_file, file_name[3:6] - elif file_name[0:3] == "utl": - return DfnType.utl, None - else: - model_type = file_name[0:3] - if file_name[3:6] == "nam": - return DfnType.model_name_file, model_type - elif file_name[3:6] == "gnc": - return DfnType.gnc_file, model_type - elif file_name[3:6] == "mvr": - return DfnType.mvr_file, model_type - elif file_name[3:6] == "mvt": - return DfnType.mvt_file, model_type - else: - return DfnType.model_file, model_type - - -class DfnPackage(Dfn): - """ - Dfn child class that loads dfn information from a list structure stored - in the auto-built package classes - - Attributes - ---------- - package : MFPackage - MFPackage subclass that contains dfn information - - Methods - ------- - get_block_structure_dict : (path : tuple, common : bool, model_file : - bool) : dict - returns a dictionary of block structure information for the package - - See Also - -------- - - Notes - ----- - - Examples - -------- - """ - - def __init__(self, package): - super().__init__() - self.package = package - self.package_type = package._package_type - self.dfn_file_name = package.dfn_file_name - # the package type is always the text after the last - - package_name = self.package_type.split("-") - self.package_type = package_name[-1] - if not isinstance(package_name, str) and len(package_name) > 1: - self.package_prefix = "".join(package_name[:-1]) - else: - self.package_prefix = "" - self.dfn_type, self.model_type = self._file_type( - self.dfn_file_name.replace("-", "") - ) - self.dfn_list = package.dfn - - def get_block_structure_dict(self, path, common, model_file, block_parent): - block_dict = {} - dataset_items_in_block = {} - self.dataset_items_needed_dict = {} - keystring_items_needed_dict = {} - current_block = None - - # get header dict - header_dict = {} - for item in self.dfn_list[0]: - if isinstance(item, str): - if item == "multi-package": - header_dict["multi-package"] = True - if item.startswith("package-type"): - header_dict["package-type"] = item.split(" ")[1] - for dfn_entry in self.dfn_list[1:]: - # load next data item - new_data_item_struct = MFDataItemStructure() - for next_line in dfn_entry: - new_data_item_struct.set_value(next_line, common) - # if block does not exist - if ( - current_block is None - or current_block.name != new_data_item_struct.block_name - ): - # create block - current_block = MFBlockStructure( - new_data_item_struct.block_name, - path, - model_file, - block_parent, - ) - # put block in block_dict - block_dict[current_block.name] = current_block - # init dataset item lookup - self.dataset_items_needed_dict = {} - dataset_items_in_block = {} - - # resolve block type - if len(current_block.block_header_structure) > 0: - if ( - len(current_block.block_header_structure[0].data_item_structures) - > 0 - and current_block.block_header_structure[0] - .data_item_structures[0] - .type - == DatumType.integer - ): - block_type = BlockType.transient - else: - block_type = BlockType.multiple - else: - block_type = BlockType.single - - if new_data_item_struct.block_variable: - block_dataset_struct = MFDataStructure( - new_data_item_struct, - model_file, - self.package_type, - self.dfn_list, - ) - block_dataset_struct.parent_block = current_block - self._process_needed_data_items( - block_dataset_struct, dataset_items_in_block - ) - block_dataset_struct.set_path(path + (new_data_item_struct.block_name,)) - block_dataset_struct.add_item(new_data_item_struct) - current_block.add_dataset(block_dataset_struct) - else: - new_data_item_struct.block_type = block_type - dataset_items_in_block[new_data_item_struct.name] = new_data_item_struct - - # if data item belongs to existing dataset(s) - item_location_found = False - if new_data_item_struct.name in self.dataset_items_needed_dict: - if new_data_item_struct.type == DatumType.record: - # record within a record - create a data set in - # place of the data item - new_data_item_struct = self._new_dataset( - new_data_item_struct, - current_block, - dataset_items_in_block, - path, - model_file, - False, - ) - new_data_item_struct.record_within_record = True - - for dataset in self.dataset_items_needed_dict[ - new_data_item_struct.name - ]: - item_added = dataset.add_item(new_data_item_struct, record=True) - item_location_found = item_location_found or item_added - # if data item belongs to an existing keystring - if new_data_item_struct.name in keystring_items_needed_dict: - new_data_item_struct.set_path( - keystring_items_needed_dict[new_data_item_struct.name].path - ) - if new_data_item_struct.type == DatumType.record: - # record within a keystring - create a data set in - # place of the data item - new_data_item_struct = self._new_dataset( - new_data_item_struct, - current_block, - dataset_items_in_block, - path, - model_file, - False, - ) - keystring_items_needed_dict[ - new_data_item_struct.name - ].keystring_dict[new_data_item_struct.name] = new_data_item_struct - item_location_found = True - - if new_data_item_struct.type == DatumType.keystring: - # add keystrings to search list - for ( - key, - val, - ) in new_data_item_struct.keystring_dict.items(): - keystring_items_needed_dict[key] = new_data_item_struct - - # if data set does not exist - if not item_location_found: - self._new_dataset( - new_data_item_struct, - current_block, - dataset_items_in_block, - path, - model_file, - True, - ) - if ( - current_block.name.upper() == "SOLUTIONGROUP" - and len(current_block.block_header_structure) == 0 - ): - # solution_group a special case for now - block_data_item_struct = MFDataItemStructure() - block_data_item_struct.name = "order_num" - block_data_item_struct.data_items = ["order_num"] - block_data_item_struct.type = DatumType.integer - block_data_item_struct.longname = "order_num" - block_data_item_struct.description = ( - "internal variable to keep track of solution group number" - ) - block_dataset_struct = MFDataStructure( - block_data_item_struct, - model_file, - self.package_type, - self.dfn_list, - ) - block_dataset_struct.parent_block = current_block - block_dataset_struct.set_path( - path + (new_data_item_struct.block_name,) - ) - block_dataset_struct.add_item(block_data_item_struct) - current_block.add_dataset(block_dataset_struct) - return block_dict, header_dict - - def _new_dataset( - self, - new_data_item_struct, - current_block, - dataset_items_in_block, - path, - model_file, - add_to_block=True, - ): - current_dataset_struct = MFDataStructure( - new_data_item_struct, model_file, self.package_type, self.dfn_list - ) - current_dataset_struct.set_path(path + (new_data_item_struct.block_name,)) - self._process_needed_data_items(current_dataset_struct, dataset_items_in_block) - if add_to_block: - # add dataset - current_block.add_dataset(current_dataset_struct) - current_dataset_struct.parent_block = current_block - current_dataset_struct.add_item(new_data_item_struct) - return current_dataset_struct - - def _process_needed_data_items( - self, current_dataset_struct, dataset_items_in_block - ): - # add data items needed to dictionary - for ( - item_name, - val, - ) in current_dataset_struct.expected_data_items.items(): - if item_name in dataset_items_in_block: - current_dataset_struct.add_item(dataset_items_in_block[item_name]) - else: - if item_name in self.dataset_items_needed_dict: - self.dataset_items_needed_dict[item_name].append( - current_dataset_struct - ) - else: - self.dataset_items_needed_dict[item_name] = [current_dataset_struct] - - -class DataType(Enum): - """ - Types of data that can be found in a package file - """ - - scalar_keyword = 1 - scalar = 2 - array = 3 - array_transient = 4 - list = 5 - list_transient = 6 - list_multiple = 7 - scalar_transient = 8 - scalar_keyword_transient = 9 - - -class DatumType(Enum): - """ - Types of individual pieces of data - """ - - keyword = 1 - integer = 2 - double_precision = 3 - string = 4 - constant = 5 - list_defined = 6 - keystring = 7 - record = 8 - repeating_record = 9 - recarray = 10 - - -class BlockType(Enum): - """ - Types of blocks that can be found in a package file - """ - - single = 1 - multiple = 2 - transient = 3 - - -class MFDataItemStructure: - """ - Defines the structure of a single MF6 data item in a dfn file - - Attributes - ---------- - block_name : str - name of block that data item is in - name : str - name of data item - name_list : list - list of alternate names for the data item, includes data item's main - name "name" - python_name : str - name of data item referenced in python, with illegal python characters - removed - type : str - type of the data item as it appears in the dfn file - type_obj : python type - type of the data item as a python type - valid_values : list - list of valid values for the data item. if empty, this constraint does - not apply - data_items : list - list of data items contained in this data_item, including itself - in_record : bool - in_record attribute as appears in dfn file - tagged : bool - whether data item is tagged. if the data item is tagged its name is - included in the MF6 input file - just_data : bool - when just_data is true only data appears in the MF6 input file. - otherwise, name information appears - shape : list - describes the shape of the data - layer_dims : list - which dimensions in the shape function as layers, if None defaults to - "layer" - reader : basestring - reader that MF6 uses to read the data - optional : bool - whether data item is optional or required as part of the MFData in the - MF6 input file - longname : str - long name of the data item - description : str - description of the data item - path : tuple - a tuple describing the data item's location within the simulation - (,,,) - repeating : bool - whether or not the data item can repeat in the MF6 input file - block_variable : bool - if true, this data item is part of the block header - block_type : BlockType - whether the block containing this item is a single non-repeating block, - a multiple repeating block, or a transient repeating block - keystring_dict : dict - dictionary containing acceptable keystrings if this data item is of - type keystring - is_cellid : bool - true if this data item is definitely of type cellid - possible_cellid : bool - true if this data item may be of type cellid - ucase : bool - this data item must be displayed in upper case in the MF6 input file - - Methods - ------- - remove_cellid : (resolved_shape : list, cellid_size : int) - removes the cellid size from the shape of a data item - set_path : (path : tuple) - sets the path to this data item to path - get_rec_type : () : object type - gets the type of object of this data item to be used in a numpy - recarray - - See Also - -------- - - Notes - ----- - - Examples - -------- - """ - - def __init__(self): - self.file_name_keywords = {"filein": False, "fileout": False} - self.file_name_key_seq = {"fname": True} - self.contained_keywords = {"fname": True, "file": True, "tdis6": True} - self.block_name = None - self.name = None - self.display_name = None - self.name_length = None - self.is_aux = False - self.is_boundname = False - self.is_mname = False - self.name_list = [] - self.python_name = None - self.type = None - self.type_string = None - self.type_obj = None - self.valid_values = [] - self.data_items = None - self.in_record = False - self.tagged = True - self.just_data = False - self.shape = [] - self.layer_dims = ["nlay"] - self.reader = None - self.optional = False - self.longname = None - self.description = "" - self.path = None - self.repeating = False - self.block_variable = False - self.block_type = BlockType.single - self.keystring_dict = {} - self.is_cellid = False - self.possible_cellid = False - self.ucase = False - self.preserve_case = False - self.default_value = None - self.numeric_index = False - self.support_negative_index = False - self.construct_package = None - self.construct_data = None - self.parameter_name = None - self.one_per_pkg = False - self.jagged_array = None - self.netcdf = False - - def set_value(self, line, common): - arr_line = line.strip().split() - if len(arr_line) > 1: - if arr_line[0] == "block": - self.block_name = " ".join(arr_line[1:]) - elif arr_line[0] == "name": - if self.type == DatumType.keyword: - # display keyword names in upper case - self.display_name = " ".join(arr_line[1:]).upper() - else: - self.display_name = " ".join(arr_line[1:]).lower() - self.name = " ".join(arr_line[1:]).lower() - self.name_list.append(self.name) - if len(self.name) >= 6 and self.name[0:6] == "cellid": - self.is_cellid = True - if ( - self.name - and self.name[0:2] == "id" - and self.type == DatumType.string - ): - self.possible_cellid = True - self.python_name = self.name.replace("-", "_").lower() - # don't allow name to be a python keyword - if keyword.iskeyword(self.name): - self.python_name = f"{self.python_name}_" - # performance optimizations - if self.name == "aux": - self.is_aux = True - if self.name == "boundname": - self.is_boundname = True - if self.name[0:5] == "mname": - self.is_mname = True - self.name_length = len(self.name) - elif arr_line[0] == "other_names": - arr_names = " ".join(arr_line[1:]).lower().split(",") - for name in arr_names: - self.name_list.append(name) - elif arr_line[0] == "type": - if self.support_negative_index: - # type already automatically set when - # support_negative_index flag is set - return - type_line = arr_line[1:] - if len(type_line) <= 0: - raise StructException( - 'Data structure "{}" does not have a type specified.'.format( - self.name - ), - self.path, - ) - self.type_string = type_line[0].lower() - self.type = self._str_to_enum_type(type_line[0]) - if ( - self.name - and self.name[0:2] == "id" - and self.type == DatumType.string - ): - self.possible_cellid = True - if ( - self.type == DatumType.recarray - or self.type == DatumType.record - or self.type == DatumType.repeating_record - or self.type == DatumType.keystring - ): - self.data_items = type_line[1:] - if self.type == DatumType.keystring: - for item in self.data_items: - self.keystring_dict[item.lower()] = 0 - else: - self.data_items = [self.name] - self.type_obj = self._get_type() - if self.type == DatumType.keyword: - # display keyword names in upper case - if self.display_name is not None: - self.display_name = self.display_name.upper() - elif arr_line[0] == "valid": - for value in arr_line[1:]: - self.valid_values.append(value) - elif arr_line[0] == "in_record": - self.in_record = self._get_boolean_val(arr_line) - elif arr_line[0] == "tagged": - self.tagged = self._get_boolean_val(arr_line) - elif arr_line[0] == "just_data": - self.just_data = self._get_boolean_val(arr_line) - elif arr_line[0] == "shape": - if len(arr_line) > 1: - self.shape = [] - for dimension in arr_line[1:]: - if dimension[-1] != ";": - dimension = dimension.replace("(", "") - dimension = dimension.replace(")", "") - dimension = dimension.replace(",", "") - if dimension[0] == "*": - dimension = dimension.replace("*", "") - # set as a "layer" dimension - self.layer_dims.insert(0, dimension) - self.shape.append(dimension) - else: - # only process what is after the last ; which by - # convention is the most generalized form of the - # shape - self.shape = [] - if len(self.shape) > 0: - self.repeating = True - elif arr_line[0] == "reader": - self.reader = " ".join(arr_line[1:]) - elif arr_line[0] == "optional": - self.optional = self._get_boolean_val(arr_line) - elif arr_line[0] == "longname": - self.longname = " ".join(arr_line[1:]) - elif arr_line[0] == "description": - if arr_line[1] == "REPLACE": - self.description = self._resolve_common(arr_line, common) - elif len(arr_line) > 1 and arr_line[1].strip(): - self.description = " ".join(arr_line[1:]) - - # clean self.description - replace_pairs = [ - ("``", '"'), # double quotes - ("''", '"'), - ("`", "'"), # single quotes - ("~", " "), # non-breaking space - (r"\mf", "MODFLOW 6"), - (r"\citep{konikow2009}", "(Konikow et al., 2009)"), - (r"\citep{hill1990preconditioned}", "(Hill, 1990)"), - (r"\ref{table:ftype}", "in mf6io.pdf"), - (r"\ref{table:gwf-obstypetable}", "in mf6io.pdf"), - ] - for s1, s2 in replace_pairs: - if s1 in self.description: - self.description = self.description.replace(s1, s2) - - # massage latex equations - self.description = self.description.replace("$<$", "<") - self.description = self.description.replace("$>$", ">") - if "$" in self.description: - descsplit = self.description.split("$") - mylist = [ - i.replace("\\", "") + ":math:`" + j.replace("\\", "\\\\") + "`" - for i, j in zip(descsplit[::2], descsplit[1::2]) - ] - mylist.append(descsplit[-1].replace("\\", "")) - self.description = "".join(mylist) - else: - self.description = self.description.replace("\\", "") - elif arr_line[0] == "block_variable": - if len(arr_line) > 1: - self.block_variable = bool(arr_line[1]) - elif arr_line[0] == "ucase": - if len(arr_line) > 1: - self.ucase = bool(arr_line[1]) - elif arr_line[0] == "preserve_case": - self.preserve_case = self._get_boolean_val(arr_line) - elif arr_line[0] == "default_value": - self.default_value = " ".join(arr_line[1:]) - elif arr_line[0] == "numeric_index": - self.numeric_index = self._get_boolean_val(arr_line) - elif arr_line[0] == "support_negative_index": - self.support_negative_index = self._get_boolean_val(arr_line) - # must be double precision to support 0 and -0 - self.type_string = "double_precision" - self.type = self._str_to_enum_type(self.type_string) - self.type_obj = self._get_type() - elif arr_line[0] == "construct_package": - self.construct_package = arr_line[1] - elif arr_line[0] == "construct_data": - self.construct_data = arr_line[1] - elif arr_line[0] == "parameter_name": - self.parameter_name = arr_line[1] - elif arr_line[0] == "one_per_pkg": - self.one_per_pkg = bool(arr_line[1]) - elif arr_line[0] == "jagged_array": - self.jagged_array = arr_line[1] - elif arr_line[0] == "netcdf": - self.netcdf = arr_line[1] - - def get_type_string(self): - return f"[{self.type_string}]" - - def get_description(self, line_size, initial_indent, level_indent): - item_desc = f"* {self.name} ({self.type_string}) {self.description}" - if self.numeric_index or self.is_cellid: - # append zero-based index text - item_desc = f"{item_desc} {numeric_index_text}" - twr = TextWrapper( - width=line_size, - initial_indent=initial_indent, - drop_whitespace=True, - subsequent_indent=f" {initial_indent}", - ) - item_desc = "\n".join(twr.wrap(item_desc)) - return item_desc - - def get_doc_string(self, line_size, initial_indent, level_indent): - description = self.get_description( - line_size, initial_indent + level_indent, level_indent - ) - param_doc_string = f"{self.python_name} : {self.get_type_string()}" - twr = TextWrapper( - width=line_size, - initial_indent=initial_indent, - subsequent_indent=f" {initial_indent}", - drop_whitespace=True, - ) - param_doc_string = "\n".join(twr.wrap(param_doc_string)) - param_doc_string = f"{param_doc_string}\n{description}" - return param_doc_string - - def get_keystring_desc(self, line_size, initial_indent, level_indent): - if self.type != DatumType.keystring: - raise StructException( - f'Can not get keystring description for "{self.name}" ' - "because it is not a keystring", - self.path, - ) - - # get description of keystring elements - description = "" - for key, item in self.keystring_dict.items(): - if description: - description = f"{description}\n" - description = "{}{}".format( - description, - item.get_doc_string(line_size, initial_indent, level_indent), - ) - return description - - def file_nam_in_nam_file(self): - for key, item in self.contained_keywords.items(): - if self.name.lower().find(key) != -1: - return True - - def indicates_file_name(self): - if self.name.lower() in self.file_name_keywords: - return True - for key in self.file_name_key_seq.keys(): - if key in self.name.lower(): - return True - return False - - def is_file_name(self): - if ( - self.name.lower() in self.file_name_keywords - and self.file_name_keywords[self.name.lower()] is True - ): - return True - for key, item in self.contained_keywords.items(): - if self.name.lower().find(key) != -1 and item is True: - return True - return False - - @staticmethod - def remove_cellid(resolved_shape, cellid_size): - # remove the cellid size from the shape - for dimension, index in zip(resolved_shape, range(0, len(resolved_shape))): - if dimension == cellid_size: - resolved_shape[index] = 1 - break - - @staticmethod - def _get_boolean_val(bool_option_line): - if len(bool_option_line) <= 1: - return False - if bool_option_line[1].lower() == "true": - return True - return False - - @staticmethod - def _find_close_bracket(arr_line): - for index, word in enumerate(arr_line): - word = word.strip() - if len(word) > 0 and word[-1] == "}": - return index - return None - - @staticmethod - def _resolve_common(arr_line, common): - if common is None: - return arr_line - if not (arr_line[2] in common and len(arr_line) >= 4): - raise StructException(f'Could not find line "{arr_line}" in common dfn.') - close_bracket_loc = MFDataItemStructure._find_close_bracket(arr_line[2:]) - resolved_str = common[arr_line[2]] - if close_bracket_loc is None: - find_replace_str = " ".join(arr_line[3:]) - else: - close_bracket_loc += 3 - find_replace_str = " ".join(arr_line[3:close_bracket_loc]) - find_replace_dict = ast.literal_eval(find_replace_str) - for find_str, replace_str in find_replace_dict.items(): - resolved_str = resolved_str.replace(find_str, replace_str) - # clean up formatting - resolved_str = resolved_str.replace("\\texttt", "") - resolved_str = resolved_str.replace("{", "") - resolved_str = resolved_str.replace("}", "") - - return resolved_str - - def set_path(self, path): - self.path = path + (self.name,) - mfstruct = MFStructure() - for dimension in self.shape: - dim_path = path + (dimension,) - if dim_path in mfstruct.dimension_dict: - mfstruct.dimension_dict[dim_path].append(self) - else: - mfstruct.dimension_dict[dim_path] = [self] - - def _get_type(self): - if self.type == DatumType.double_precision: - return float - elif self.type == DatumType.integer: - return int - elif self.type == DatumType.constant: - return bool - elif self.type == DatumType.string: - return str - elif self.type == DatumType.list_defined: - return str - return str - - def _str_to_enum_type(self, type_string): - if type_string.lower() == "keyword": - return DatumType.keyword - elif type_string.lower() == "integer": - return DatumType.integer - elif ( - type_string.lower() == "double_precision" or type_string.lower() == "double" - ): - return DatumType.double_precision - elif type_string.lower() == "string": - return DatumType.string - elif type_string.lower() == "constant": - return DatumType.constant - elif type_string.lower() == "list-defined": - return DatumType.list_defined - elif type_string.lower() == "keystring": - return DatumType.keystring - elif type_string.lower() == "record": - return DatumType.record - elif type_string.lower() == "recarray": - return DatumType.recarray - elif type_string.lower() == "repeating_record": - return DatumType.repeating_record - else: - exc_text = f'Data item type "{type_string}" not supported.' - raise StructException(exc_text, self.path) - - def get_rec_type(self): - item_type = self.type_obj - if item_type == str or self.is_cellid or self.possible_cellid: - return object - return item_type - - -class MFDataStructure: - """ - Defines the structure of a single MF6 data item in a dfn file - - Parameters - ---------- - data_item : MFDataItemStructure - base data item associated with this data structure - model_data : bool - whether or not this is part of a model - package_type : str - abbreviated package type - - Attributes - ---------- - type : str - type of the data as it appears in the dfn file - path : tuple - a tuple describing the data's location within the simulation - (,,,) - optional : bool - whether data is optional or required as part of the MFBlock in the MF6 - input file - name : str - name of data item - name_list : list - list of alternate names for the data, includes data item's main name - "name" - python_name : str - name of data referenced in python, with illegal python characters - removed - longname : str - long name of the data - repeating : bool - whether or not the data can repeat in the MF6 input file - layered : bool - whether this data can appear by layer - num_data_items : int - number of data item structures contained in this MFDataStructure, - including itself - record_within_record : bool - true if this MFDataStructure is a record within a container - MFDataStructure - file_data : bool - true if data points to a file - block_type : BlockType - whether the block containing this data is a single non-repeating block, - a multiple repeating block, or a transient repeating block - block_variable : bool - if true, this data is part of the block header - model_data : bool - if true, data is part of a model - num_optional : int - number of optional data items - parent_block : MFBlockStructure - parent block structure object - data_item_structures : list - list of data item structures contained in this MFDataStructure - expected_data_items : dict - dictionary of expected data item names for quick lookup - shape : tuple - shape of first data item - - Methods - ------- - get_keywords : () : list - returns a list of all keywords associated with this data - supports_aux : () : bool - returns true of this data supports aux variables - add_item : (item : MFDataItemStructure, record : bool) - adds a data item to this MFDataStructure - set_path : (path : tuple) - sets the path describing the data's location within the simulation - (,,,) - get_datatype : () : DataType - returns the DataType of this data (array, list, scalar, ...) - get_min_record_entries : () : int - gets the minimum number of entries, as entered in a package file, - for a single record. excludes optional data items - get_record_size : () : int - gets the number of data items, excluding keyword data items, in this - MFDataStructure - all_keywords : () : bool - returns true of all data items are keywords - get_type_string : () : str - returns descriptive string of the data types in this MFDataStructure - get_description : () : str - returns a description of the data - get_type_array : (type_array : list): - builds an array of data type information in type_array - get_datum_type : (numpy_type : bool): - returns the object type of the first data item in this MFDataStructure - with a standard type. if numpy_type is true returns the type as a - numpy type - get_data_item_types: () : list - returns a list of object type for every data item in this - MFDataStructure - first_non_keyword_index : () : int - return the index of the first data item in this MFDataStructure that is - not a keyword - - See Also - -------- - - Notes - ----- - - Examples - -------- - """ - - def __init__(self, data_item, model_data, package_type, dfn_list): - self.type = data_item.type - self.package_type = package_type - self.path = None - self.optional = data_item.optional - self.name = data_item.name - self.block_name = data_item.block_name - self.name_length = len(self.name) - self.is_aux = data_item.is_aux - self.is_boundname = data_item.is_boundname - self.name_list = data_item.name_list - self.python_name = data_item.python_name - self.longname = data_item.longname - self.default_value = data_item.default_value - self.repeating = False - self.layered = ( - "nlay" in data_item.shape - or "nodes" in data_item.shape - or len(data_item.layer_dims) > 1 - ) - self.netcdf = data_item.netcdf - self.num_data_items = len(data_item.data_items) - self.record_within_record = False - self.file_data = False - self.nam_file_data = False - self.block_type = data_item.block_type - self.block_variable = data_item.block_variable - self.model_data = model_data - self.num_optional = 0 - self.parent_block = None - self._fpmerge_data_item(data_item, dfn_list) - self.construct_package = data_item.construct_package - self.construct_data = data_item.construct_data - self.parameter_name = data_item.parameter_name - self.one_per_pkg = data_item.one_per_pkg - - self.data_item_structures = [] - self.expected_data_items = {} - self.shape = data_item.shape - if ( - self.type == DatumType.recarray - or self.type == DatumType.record - or self.type == DatumType.repeating_record - ): - # record expected data for later error checking - for data_item_name in data_item.data_items: - self.expected_data_items[data_item_name] = len(self.expected_data_items) - else: - self.expected_data_items[data_item.name] = len(self.expected_data_items) - - @property - def basic_item(self): - if not self.parent_block.parent_package.stress_package: - return False - for item in self.data_item_structures: - if ( - ( - (item.repeating or item.optional) - and not (item.is_cellid or item.is_aux or item.is_boundname) - ) - or item.jagged_array is not None - or item.type == DatumType.keystring - or item.type == DatumType.keyword - or ( - item.description is not None - and "keyword `NONE'" in item.description - ) - ): - return False - return True - - @property - def is_mname(self): - for item in self.data_item_structures: - if item.is_mname: - return True - return False - - def get_item(self, item_name): - for item in self.data_item_structures: - if item.name.lower() == item_name.lower(): - return item - return None - - def get_keywords(self): - keywords = [] - if ( - self.type == DatumType.recarray - or self.type == DatumType.record - or self.type == DatumType.repeating_record - ): - for data_item_struct in self.data_item_structures: - if data_item_struct.type == DatumType.keyword: - if len(keywords) == 0: - # create first keyword tuple - for name in data_item_struct.name_list: - keywords.append((name,)) - else: - # update all keyword tuples with latest keyword found - new_keywords = [] - for keyword_tuple in keywords: - for name in data_item_struct.name_list: - new_keywords.append(keyword_tuple + (name,)) - if data_item_struct.optional: - keywords = keywords + new_keywords - else: - keywords = new_keywords - elif data_item_struct.type == DatumType.keystring: - for keyword_item in data_item_struct.data_items: - keywords.append((keyword_item,)) - elif len(keywords) == 0: - if len(data_item_struct.valid_values) > 0: - new_keywords = [] - # loop through all valid values and append to the end - # of each keyword tuple - for valid_value in data_item_struct.valid_values: - if len(keywords) == 0: - new_keywords.append((valid_value,)) - else: - for keyword_tuple in keywords: - new_keywords.append(keyword_tuple + (valid_value,)) - keywords = new_keywords - else: - for name in data_item_struct.name_list: - keywords.append((name,)) - else: - for name in self.name_list: - keywords.append((name,)) - return keywords - - def supports_aux(self): - for data_item_struct in self.data_item_structures: - if data_item_struct.name.lower() == "aux": - return True - return False - - def add_item(self, item, record=False, dfn_list=None): - item_added = False - if item.type != DatumType.recarray and ( - (item.type != DatumType.record and item.type != DatumType.repeating_record) - or record is True - ): - if item.name not in self.expected_data_items: - raise StructException( - 'Could not find data item "{}" in ' - "expected data items of data structure " - "{}.".format(item.name, self.name), - self.path, - ) - item.set_path(self.path) - if len(self.data_item_structures) == 0: - self.keyword = item.name - # insert data item into correct location in array - location = self.expected_data_items[item.name] - if len(self.data_item_structures) > location: - # TODO: ask about this condition and remove - if self.data_item_structures[location] is None: - # verify that this is not a placeholder value - if self.data_item_structures[location] is not None: - raise StructException( - 'Data structure "{}" already ' - 'has the item named "{}"' - ".".format(self.name, item.name), - self.path, - ) - if isinstance(item, MFDataItemStructure): - self.nam_file_data = ( - self.nam_file_data or item.file_nam_in_nam_file() - ) - self.file_data = self.file_data or item.indicates_file_name() - # replace placeholder value - self.data_item_structures[location] = item - item_added = True - else: - for index in range(0, location - len(self.data_item_structures)): - # insert placeholder in array - self.data_item_structures.append(None) - if isinstance(item, MFDataItemStructure): - self.nam_file_data = ( - self.nam_file_data or item.file_nam_in_nam_file() - ) - self.file_data = self.file_data or item.indicates_file_name() - self.data_item_structures.append(item) - item_added = True - self.optional = self.optional and item.optional - if item.optional: - self.num_optional += 1 - if item_added: - self._fpmerge_data_item(item, dfn_list) - return item_added - - def _fpmerge_data_item(self, item, dfn_list): - mfstruct = MFStructure() - # check for flopy-specific dfn data - if item.name.lower() in mfstruct.flopy_dict: - # read flopy-specific dfn data - for name, value in mfstruct.flopy_dict[item.name.lower()].items(): - line = f"{name} {value}" - item.set_value(line, None) - if dfn_list is not None: - dfn_list[-1].append(line) - - def set_path(self, path): - self.path = path + (self.name,) - - def get_datatype(self): - if self.type == DatumType.recarray: - if self.block_type != BlockType.single and not self.block_variable: - if self.block_type == BlockType.transient: - return DataType.list_transient - else: - return DataType.list_multiple - else: - return DataType.list - if self.type == DatumType.record or self.type == DatumType.repeating_record: - record_size, repeating_data_item = self.get_record_size() - if (record_size >= 1 and not self.all_keywords()) or repeating_data_item: - if self.block_type != BlockType.single and not self.block_variable: - if self.block_type == BlockType.transient: - return DataType.list_transient - else: - return DataType.list_multiple - else: - return DataType.list - else: - if self.block_type != BlockType.single and not self.block_variable: - return DataType.scalar_transient - else: - return DataType.scalar - elif ( - len(self.data_item_structures) > 0 - and self.data_item_structures[0].repeating - ): - if self.data_item_structures[0].type == DatumType.string: - return DataType.list - else: - if self.block_type == BlockType.single: - return DataType.array - else: - return DataType.array_transient - elif ( - len(self.data_item_structures) > 0 - and self.data_item_structures[0].type == DatumType.keyword - ): - if self.block_type != BlockType.single and not self.block_variable: - return DataType.scalar_keyword_transient - else: - return DataType.scalar_keyword - else: - if self.block_type != BlockType.single and not self.block_variable: - return DataType.scalar_transient - else: - return DataType.scalar - - def is_mult_or_trans(self): - data_type = self.get_datatype() - if ( - data_type == DataType.scalar_keyword_transient - or data_type == DataType.array_transient - or data_type == DataType.list_transient - or data_type == DataType.list_multiple - ): - return True - return False - - def get_min_record_entries(self): - count = 0 - for data_item_structure in self.data_item_structures: - if not data_item_structure.optional: - if data_item_structure.type == DatumType.record: - count += data_item_structure.get_record_size()[0] - else: - if data_item_structure.type != DatumType.keyword: - count += 1 - return count - - def get_record_size(self): - count = 0 - repeating = False - for data_item_structure in self.data_item_structures: - if data_item_structure.type == DatumType.record: - count += data_item_structure.get_record_size()[0] - else: - if data_item_structure.type != DatumType.keyword or count > 0: - if data_item_structure.repeating: - # count repeats as one extra record - repeating = True - count += 1 - return count, repeating - - def all_keywords(self): - for data_item_structure in self.data_item_structures: - if data_item_structure.type == DatumType.record: - if not data_item_structure.all_keywords(): - return False - else: - if data_item_structure.type != DatumType.keyword: - return False - return True - - def get_type_string(self): - type_array = [] - self.get_docstring_type_array(type_array) - type_string = ", ".join(type_array) - type_header = "" - type_footer = "" - if len(self.data_item_structures) > 1 or self.data_item_structures[0].repeating: - type_header = "[" - type_footer = "]" - if self.repeating: - type_footer = f"] ... [{type_string}]" - - return f"{type_header}{type_string}{type_footer}" - - def get_docstring_type_array(self, type_array): - for index, item in enumerate(self.data_item_structures): - if item.type == DatumType.record: - item.get_docstring_type_array(type_array) - else: - if self.display_item(index): - if ( - self.type == DatumType.recarray - or self.type == DatumType.record - or self.type == DatumType.repeating_record - ): - type_array.append(str(item.name)) - else: - type_array.append(str(self._resolve_item_type(item))) - - def get_description( - self, line_size=79, initial_indent=" ", level_indent=" " - ): - type_array = [] - self.get_type_array(type_array) - description = "" - for datastr, index, itype in type_array: - item = datastr.data_item_structures[index] - if item is None: - continue - if item.type == DatumType.record: - item_desc = item.get_description( - line_size, initial_indent + level_indent, level_indent - ) - description = f"{description}\n{item_desc}" - elif datastr.display_item(index): - if len(description.strip()) > 0: - description = f"{description}\n" - item_desc = item.description - if item.numeric_index or item.is_cellid: - # append zero-based index text - item_desc = f"{item_desc} {numeric_index_text}" - - item_desc = f"* {item.name} ({itype}) {item_desc}" - twr = TextWrapper( - width=line_size, - initial_indent=initial_indent, - subsequent_indent=f" {initial_indent}", - ) - item_desc = "\n".join(twr.wrap(item_desc)) - description = f"{description}{item_desc}" - if item.type == DatumType.keystring: - keystr_desc = item.get_keystring_desc( - line_size, initial_indent + level_indent, level_indent - ) - description = f"{description}\n{keystr_desc}" - return description - - def get_subpackage_description( - self, line_size=79, initial_indent=" ", level_indent=" " - ): - item_desc = ( - "* Contains data for the {} package. Data can be " - "stored in a dictionary containing data for the {} " - "package with variable names as keys and package data as " - "values. Data just for the {} variable is also " - "acceptable. See {} package documentation for more " - "information" - ".".format( - self.construct_package, - self.construct_package, - self.parameter_name, - self.construct_package, - ) - ) - twr = TextWrapper( - width=line_size, - initial_indent=initial_indent, - subsequent_indent=f" {initial_indent}", - ) - return "\n".join(twr.wrap(item_desc)) - - def get_doc_string(self, line_size=79, initial_indent=" ", level_indent=" "): - if self.parameter_name is not None: - description = self.get_subpackage_description( - line_size, initial_indent + level_indent, level_indent - ) - var_name = self.parameter_name - type_name = f"{{varname:data}} or {self.construct_data} data" - else: - description = self.get_description( - line_size, initial_indent + level_indent, level_indent - ) - var_name = self.python_name - type_name = self.get_type_string() - - param_doc_string = f"{var_name} : {type_name}" - twr = TextWrapper( - width=line_size, - initial_indent=initial_indent, - subsequent_indent=f" {initial_indent}", - ) - param_doc_string = "\n".join(twr.wrap(param_doc_string)) - param_doc_string = f"{param_doc_string}\n{description}" - return param_doc_string - - def get_type_array(self, type_array): - for index, item in enumerate(self.data_item_structures): - if item.type == DatumType.record: - item.get_type_array(type_array) - else: - if self.display_item(index): - type_array.append( - ( - self, - index, - str(self._resolve_item_type(item)), - ) - ) - - def _resolve_item_type(self, item): - item_type = item.type_string - first_nk_idx = self.first_non_keyword_index() - # single keyword is type boolean - if item_type == "keyword" and len(self.data_item_structures) == 1: - item_type = "boolean" - if item.is_cellid: - item_type = "(integer, ...)" - # two keywords - if len(self.data_item_structures) == 2 and first_nk_idx is None: - # keyword type is string - item_type = "string" - return item_type - - def display_item(self, item_num): - item = self.data_item_structures[item_num] - first_nk_idx = self.first_non_keyword_index() - # all keywords excluded if there is a non-keyword - if not (item.type == DatumType.keyword and first_nk_idx is not None): - # ignore first keyword if there are two keywords - if ( - len(self.data_item_structures) == 2 - and first_nk_idx is None - and item_num == 0 - ): - return False - return True - return False - - def get_datum_type(self, numpy_type=False, return_enum_type=False): - data_item_types = self.get_data_item_types() - for var_type in data_item_types: - if ( - var_type[0] == DatumType.double_precision - or var_type[0] == DatumType.integer - or var_type[0] == DatumType.string - ): - if return_enum_type: - return var_type[0] - else: - if numpy_type: - if var_type[0] == DatumType.double_precision: - return np.float64 - elif var_type[0] == DatumType.integer: - return np.int32 - else: - return object - else: - return var_type[2] - return None - - def get_data_item_types(self): - data_item_types = [] - for data_item in self.data_item_structures: - if data_item.type == DatumType.record: - # record within a record - data_item_types += data_item.get_data_item_types() - else: - data_item_types.append( - [data_item.type, data_item.type_string, data_item.type_obj] - ) - return data_item_types - - def first_non_keyword_index(self): - for data_item, index in zip( - self.data_item_structures, range(0, len(self.data_item_structures)) - ): - if data_item.type != DatumType.keyword: - return index - return None - - def get_model(self): - if self.model_data: - if len(self.path) >= 1: - return self.path[0] - return None - - def get_package(self): - if self.model_data: - if len(self.path) >= 2: - return self.path[1] - else: - if len(self.path) >= 1: - return self.path[0] - return "" - - -class MFBlockStructure: - """ - Defines the structure of a MF6 block. - - - Parameters - ---------- - name : string - block name - path : tuple - tuple that describes location of block within simulation - (, , ) - model_block : bool - true if this block is part of a model - - Attributes - ---------- - name : string - block name - path : tuple - tuple that describes location of block within simulation - (, , ) - model_block : bool - true if this block is part of a model - data_structures : dict - dictionary of data items in this block, with the data item name as - the key - block_header_structure : list - list of data items that are part of this block's "header" - - Methods - ------- - repeating() : bool - Returns true if more than one instance of this block can appear in a - MF6 package file - add_dataset(dataset : MFDataStructure, block_header_dataset : bool) - Adds dataset to this block, as a header dataset of block_header_dataset - is true - number_non_optional_data() : int - Returns the number of non-optional non-header data structures in - this block - number_non_optional_block_header_data() : int - Returns the number of non-optional block header data structures in - this block - get_data_structure(path : tuple) : MFDataStructure - Returns the data structure in this block with name defined by path[0]. - If name does not exist, returns None. - get_all_recarrays() : list - Returns all data non-header data structures in this block that are of - type recarray - - See Also - -------- - - Notes - ----- - - Examples - -------- - - - """ - - def __init__(self, name, path, model_block, parent_package): - # initialize - self.data_structures = {} - self.block_header_structure = [] - self.name = name - self.path = path + (self.name,) - self.model_block = model_block - self.parent_package = parent_package - - def repeating(self): - if len(self.block_header_structure) > 0: - return True - return False - - def add_dataset(self, dataset): - dataset.set_path(self.path) - if dataset.block_variable: - self.block_header_structure.append(dataset) - else: - self.data_structures[dataset.name] = dataset - - def number_non_optional_data(self): - num = 0 - for key, data_structure in self.data_structures.items(): - if not data_structure.optional: - num += 1 - return num - - def number_non_optional_block_header_data(self): - if ( - len(self.block_header_structure) > 0 - and not self.block_header_structure[0].optional - ): - return 1 - else: - return 0 - - def get_data_structure(self, path): - if path[0] in self.data_structures: - return self.data_structures[path[0]] - else: - return None - - def get_all_recarrays(self): - recarray_list = [] - for ds_key, item in self.data_structures.items(): - if item.type == DatumType.recarray: - recarray_list.append(item) - return recarray_list - - -class MFInputFileStructure: - """ - MODFLOW Input File Structure class. Loads file - structure information for individual input file - types. - - - Parameters - ---------- - dfn_file : string - the definition file used to define the structure of this input file - path : tuple - path defining the location of the container of this input file - structure within the overall simulation structure - common : bool - is this the common dfn file - model_file : bool - this file belongs to a specific model type - - Attributes - ---------- - valid : bool - simulation structure validity - path : tuple - path defining the location of this input file structure within the - overall simulation structure - read_as_arrays : bool - if this input file structure is the READASARRAYS version of a package - - Methods - ------- - is_valid() : bool - Checks all structures objects within the file for validity - get_data_structure(path : string) - Returns a data structure of it exists, otherwise returns None. Data - structure type returned is based on the tuple/list "path" - - See Also - -------- - - Notes - ----- - - Examples - -------- - - """ - - def __init__(self, dfn_file, path, common, model_file): - # initialize - self.file_type = dfn_file.package_type - self.file_prefix = dfn_file.package_prefix - self.dfn_type = dfn_file.dfn_type - self.dfn_file_name = dfn_file.dfn_file_name - self.description = "" - self.path = path + (self.file_type,) - self.model_file = model_file # file belongs to a specific model - self.read_as_arrays = False - - self.blocks, self.header = dfn_file.get_block_structure_dict( - self.path, - common, - model_file, - self, - ) - self.has_packagedata = "packagedata" in self.blocks - self.has_perioddata = "period" in self.blocks - self.multi_package_support = "multi-package" in self.header - self.stress_package = ( - "package-type" in self.header - and self.header["package-type"] == "stress-package" - ) - self.advanced_stress_package = ( - "package-type" in self.header - and self.header["package-type"] == "advanced-stress-package" - ) - self.dfn_list = dfn_file.dfn_list - self.sub_package = self._sub_package() - - def advanced_package(self): - return self.has_packagedata and self.has_perioddata - - def _sub_package(self): - mfstruct = MFStructure() - for value in mfstruct.flopy_dict.values(): - if value is not None and "construct_package" in value: - if self.file_type == value["construct_package"]: - return True - return False - - def is_valid(self): - valid = True - for block in self.blocks: - valid = valid and block.is_valid() - return valid - - def get_data_structure(self, path): - if isinstance(path, tuple) or isinstance(path, list): - if path[0] in self.blocks: - return self.blocks[path[0]].get_data_structure(path[1:]) - else: - return None - else: - for block in self.blocks: - if path in block.data_structures: - return block.data_structures[path] - return None - - -class MFModelStructure: - """ - Defines the structure of a MF6 model and its packages - - Parameters - ---------- - model_type : string - abbreviation of model type - - Attributes - ---------- - valid : bool - simulation structure validity - name_file_struct_obj : MFInputFileStructure - describes the structure of the simulation name file - package_struct_objs : dict - describes the structure of the simulation packages - model_type : string - dictionary containing simulation package structure - - Methods - ------- - add_namefile : (dfn_file : DfnFile, model_file=True : bool) - Adds a namefile structure object to the model - add_package(dfn_file : DfnFile, model_file=True : bool) - Adds a package structure object to the model - is_valid() : bool - Checks all structures objects within the model for validity - get_data_structure(path : string) - Returns a data structure of it exists, otherwise returns None. Data - structure type returned is based on the tuple/list "path" - - See Also - -------- - - Notes - ----- - - Examples - -------- - """ - - def __init__(self, model_type, utl_struct_objs): - # add name file structure - self.model_type = model_type - self.name_file_struct_obj = None - self.package_struct_objs = {} - self.utl_struct_objs = utl_struct_objs - - def add_namefile(self, dfn_file, common): - self.name_file_struct_obj = MFInputFileStructure( - dfn_file, (self.model_type,), common, True - ) - - def add_package(self, dfn_file, common): - self.package_struct_objs[dfn_file.package_type] = MFInputFileStructure( - dfn_file, (self.model_type,), common, True - ) - - def get_package_struct(self, package_type): - if package_type in self.package_struct_objs: - return self.package_struct_objs[package_type] - elif package_type in self.utl_struct_objs: - return self.utl_struct_objs[package_type] - else: - return None - - def is_valid(self): - valid = True - for package_struct in self.package_struct_objs: - valid = valid and package_struct.is_valid() - return valid - - def get_data_structure(self, path): - if path[0] in self.package_struct_objs: - if len(path) > 1: - return self.package_struct_objs[path[0]].get_data_structure(path[1:]) - else: - return self.package_struct_objs[path[0]] - elif path[0] == "nam": - if len(path) > 1: - return self.name_file_struct_obj.get_data_structure(path[1:]) - else: - return self.name_file_struct_obj - else: - return None - - -class MFSimulationStructure: - """ - Defines the structure of a MF6 simulation and its packages - and models. - - Parameters - ---------- - - Attributes - ---------- - name_file_struct_obj : MFInputFileStructure - describes the structure of the simulation name file - package_struct_objs : dict - describes the structure of the simulation packages - model_struct_objs : dict - describes the structure of the supported model types - utl_struct_objs : dict - describes the structure of the supported utility packages - common : dict - common file information - model_type : string - placeholder - - Methods - ------- - process_dfn : (dfn_file : DfnFile) - reads in the contents of a dfn file, storing that contents in the - appropriate object - add_namefile : (dfn_file : DfnFile, model_file=True : bool) - Adds a namefile structure object to the simulation - add_util : (dfn_file : DfnFile) - Adds a utility package structure object to the simulation - add_package(dfn_file : DfnFile, model_file=True : bool) - Adds a package structure object to the simulation - store_common(dfn_file : DfnFile) - Stores the contents of the common dfn file - add_model(model_type : string) - Adds a model structure object to the simulation - is_valid() : bool - Checks all structures objects within the simulation for validity - get_data_structure(path : string) - Returns a data structure of it exists, otherwise returns None. Data - structure type returned is based on the tuple/list "path" - tag_read_as_arrays - Searches through all packages and tags any packages with a name that - indicates they are the READASARRAYS version of a package. - - See Also - -------- - - Notes - ----- - - Examples - -------- - """ - - def __init__(self): - # initialize - self.name_file_struct_obj = None - self.package_struct_objs = {} - self.utl_struct_objs = {} - self.model_struct_objs = {} - self.common = None - self.model_type = "" - - @property - def model_types(self): - model_type_list = [] - for model in self.model_struct_objs.values(): - model_type_list.append(model.model_type[:-1]) - return model_type_list - - def process_dfn(self, dfn_file): - if dfn_file.dfn_type == DfnType.common: - self.store_common(dfn_file) - elif dfn_file.dfn_type == DfnType.sim_name_file: - self.add_namefile(dfn_file, False) - elif ( - dfn_file.dfn_type == DfnType.sim_tdis_file - or dfn_file.dfn_type == DfnType.exch_file - or dfn_file.dfn_type == DfnType.ims_file - ): - self.add_package(dfn_file, False) - elif dfn_file.dfn_type == DfnType.utl: - self.add_util(dfn_file) - elif ( - dfn_file.dfn_type == DfnType.model_file - or dfn_file.dfn_type == DfnType.model_name_file - or dfn_file.dfn_type == DfnType.gnc_file - or dfn_file.dfn_type == DfnType.mvr_file - or dfn_file.dfn_type == DfnType.mvt_file - ): - model_ver = f"{dfn_file.model_type}{MFStructure().get_version_string()}" - if model_ver not in self.model_struct_objs: - self.add_model(model_ver) - if dfn_file.dfn_type == DfnType.model_file: - self.model_struct_objs[model_ver].add_package(dfn_file, self.common) - elif ( - dfn_file.dfn_type == DfnType.gnc_file - or dfn_file.dfn_type == DfnType.mvr_file - or dfn_file.dfn_type == DfnType.mvt_file - ): - # gnc and mvr files belong both on the simulation and model - # level - self.model_struct_objs[model_ver].add_package(dfn_file, self.common) - self.add_package(dfn_file, False) - else: - self.model_struct_objs[model_ver].add_namefile(dfn_file, self.common) - - def add_namefile(self, dfn_file, model_file=True): - self.name_file_struct_obj = MFInputFileStructure( - dfn_file, (), self.common, model_file - ) - - def add_util(self, dfn_file): - self.utl_struct_objs[dfn_file.package_type] = MFInputFileStructure( - dfn_file, (), self.common, True - ) - - def add_package(self, dfn_file, model_file=True): - self.package_struct_objs[dfn_file.package_type] = MFInputFileStructure( - dfn_file, (), self.common, model_file - ) - - def store_common(self, dfn_file): - # store common stuff - self.common = dfn_file.dict_by_name() - - def add_model(self, model_type): - self.model_struct_objs[model_type] = MFModelStructure( - model_type, self.utl_struct_objs - ) - - def is_valid(self): - valid = True - for package_struct in self.package_struct_objs: - valid = valid and package_struct.is_valid() - for model_struct in self.model_struct_objs: - valid = valid and model_struct.is_valid() - return valid - - def get_data_structure(self, path): - if path[0] in self.package_struct_objs: - if len(path) > 1: - return self.package_struct_objs[path[0]].get_data_structure(path[1:]) - else: - return self.package_struct_objs[path[0]] - elif path[0] in self.model_struct_objs: - if len(path) > 1: - return self.model_struct_objs[path[0]].get_data_structure(path[1:]) - else: - return self.model_struct_objs[path[0]] - elif path[0] in self.utl_struct_objs: - if len(path) > 1: - return self.utl_struct_objs[path[0]].get_data_structure(path[1:]) - else: - return self.utl_struct_objs[path[0]] - elif path[0] == "nam": - if len(path) > 1: - return self.name_file_struct_obj.get_data_structure(path[1:]) - else: - return self.name_file_struct_obj - else: - return None - - def tag_read_as_arrays(self): - for key, package_struct in self.package_struct_objs.items(): - if ( - package_struct.get_data_structure(("options", "readasarrays")) - or package_struct.get_data_structure(("options", "readarraylayer")) - or package_struct.get_data_structure(("options", "readarraygrid")) - ): - package_struct.read_as_arrays = True - for model_key, model_struct in self.model_struct_objs.items(): - for ( - key, - package_struct, - ) in model_struct.package_struct_objs.items(): - if ( - package_struct.get_data_structure(("options", "readasarrays")) - or package_struct.get_data_structure(("options", "readarraylayer")) - or package_struct.get_data_structure(("options", "readarraygrid")) - ): - package_struct.read_as_arrays = True - - -class MFStructure: - """ - Singleton class for accessing the contents of the json structure file - (only one instance of this class can exist, which loads the json file on - initialization) - - Parameters - ---------- - mf_version : int - version of MODFLOW - valid : bool - whether the structure information loaded from the dfn files is valid - sim_struct : MFSimulationStructure - Object containing file structure for all simulation files - dimension_dict : dict - Dictionary mapping paths to dimension information to the dataitem whose - dimension information is being described - """ - - _instance = None - - def __new__(cls): - if cls._instance is None: - cls._instance = super().__new__(cls) - - # Initialize variables - cls._instance.mf_version = 6 - cls._instance.sim_struct = None - cls._instance.dimension_dict = {} - cls._instance.flopy_dict = {} - - # Read metadata from file - cls._instance.valid = cls._instance._load_structure() - - return cls._instance - - def get_version_string(self): - return format(str(self.mf_version)) - - def _load_structure(self): - # set up structure classes - self.sim_struct = MFSimulationStructure() - - # initialize flopy dict keys - MFStructure().flopy_dict["solution_packages"] = {} - - from ..mfpackage import MFPackage - - for package in MFPackage.__subclasses__(): - # process header - for entry in package.dfn[0][1:]: - if isinstance(entry, list) and entry[0] == "solution_package": - MFStructure().flopy_dict["solution_packages"][ - package.package_abbr - ] = entry[1:] - # process each package - self.sim_struct.process_dfn(DfnPackage(package)) - self.sim_struct.tag_read_as_arrays() - - return True diff --git a/flopy/mf6/tmp/ruff/2/mfmodel.py b/flopy/mf6/tmp/ruff/2/mfmodel.py deleted file mode 100644 index 45c31b722b..0000000000 --- a/flopy/mf6/tmp/ruff/2/mfmodel.py +++ /dev/null @@ -1,2256 +0,0 @@ -import inspect -import os -import sys -import warnings -from typing import Optional, Union - -import numpy as np - -from ..discretization.grid import Grid -from ..discretization.modeltime import ModelTime -from ..discretization.structuredgrid import StructuredGrid -from ..discretization.unstructuredgrid import UnstructuredGrid -from ..discretization.vertexgrid import VertexGrid -from ..mbase import ModelInterface -from ..utils import datautil -from ..utils.check import mf6check -from .coordinates import modeldimensions -from .data import mfdata, mfdatalist, mfstructure -from .data.mfdatautil import DataSearchOutput, iterable -from .mfbase import ( - ExtFileAction, - FlopyException, - MFDataException, - MFFileMgmt, - PackageContainer, - PackageContainerType, - ReadAsArraysException, - VerbosityLevel, -) -from .mfpackage import MFPackage -from .utils.mfenums import DiscretizationType -from .utils.output_util import MF6Output - - -class MFModel(ModelInterface): - """ - MODFLOW-6 model base class. Represents a single model in a simulation. - - Parameters - ---------- - simulation_data : MFSimulationData - Simulation data object of the simulation this model will belong to - structure : MFModelStructure - Structure of this type of model - modelname : str - Name of the model - model_nam_file : str - Relative path to the model name file from model working folder - version : str - Version of modflow - exe_name : str - Model executable name - model_ws : str - Model working folder path - disfile : str - Relative path to dis file from model working folder - grid_type : str - Type of grid the model will use (structured, unstructured, vertices) - verbose : bool - Verbose setting for model operations (default False) - - Attributes - ---------- - name : str - Name of the model - exe_name : str - Model executable name - packages : dict of MFPackage - Dictionary of model packages - - """ - - def __init__( - self, - simulation, - model_type="gwf6", - modelname="model", - model_nam_file=None, - version="mf6", - exe_name="mf6", - add_to_simulation=True, - structure=None, - model_rel_path=".", - verbose=False, - **kwargs, - ): - self._package_container = PackageContainer(simulation.simulation_data) - self.simulation = simulation - self.simulation_data = simulation.simulation_data - self.name = modelname - self.name_file = None - self._version = version - self.model_type = model_type - self.type = "Model" - - if model_nam_file is None: - model_nam_file = f"{modelname}.nam" - - if add_to_simulation: - self.structure = simulation.register_model( - self, model_type, modelname, model_nam_file - ) - else: - self.structure = structure - self.set_model_relative_path(model_rel_path) - self.exe_name = exe_name - self.dimensions = modeldimensions.ModelDimensions( - self.name, self.simulation_data - ) - self.simulation_data.model_dimensions[modelname] = self.dimensions - self._ftype_num_dict = {} - self._package_paths = {} - self._verbose = verbose - - if model_nam_file is None: - self.model_nam_file = f"{modelname}.nam" - else: - self.model_nam_file = model_nam_file - - # check for spatial reference info in kwargs - xll = kwargs.pop("xll", None) - yll = kwargs.pop("yll", None) - self._xul = kwargs.pop("xul", None) - self._yul = kwargs.pop("yul", None) - rotation = kwargs.pop("rotation", 0.0) - crs = kwargs.pop("crs", None) - # build model grid object - self._modelgrid = Grid(crs=crs, xoff=xll, yoff=yll, angrot=rotation) - - self.start_datetime = None - # check for extraneous kwargs - if len(kwargs) > 0: - kwargs_str = ", ".join(kwargs.keys()) - excpt_str = ( - f'Extraneous kwargs "{kwargs_str}" provided to MFModel.' - ) - raise FlopyException(excpt_str) - - # build model name file - # create name file based on model type - support different model types - package_obj = PackageContainer.package_factory("nam", model_type[0:3]) - if not package_obj: - excpt_str = ( - f"Name file could not be found for model{model_type[0:3]}." - ) - raise FlopyException(excpt_str) - - self.name_file = package_obj( - self, - filename=self.model_nam_file, - pname=self.name, - _internal_package=True, - ) - - def __init_subclass__(cls): - """Register model type""" - super().__init_subclass__() - PackageContainer.modflow_models.append(cls) - PackageContainer.models_by_type[cls.model_type] = cls - - def __getattr__(self, item): - """ - __getattr__ - used to allow for getting packages as if they are - attributes - - Parameters - ---------- - item : str - 3 character package name (case insensitive) - - - Returns - ------- - pp : Package object - Package object of type :class:`flopy.pakbase.Package` - - """ - if item == "name_file" or not hasattr(self, "name_file"): - raise AttributeError(item) - - package = self.get_package(item) - if package is not None: - return package - raise AttributeError(item) - - def __setattr__(self, name, value): - if hasattr(self, name) and getattr(self, name) is not None: - attribute = object.__getattribute__(self, name) - if attribute is not None and isinstance(attribute, mfdata.MFData): - try: - if isinstance(attribute, mfdatalist.MFList): - attribute.set_data(value, autofill=True) - else: - attribute.set_data(value) - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self.name, - package="", - ) - return - super().__setattr__(name, value) - - def __repr__(self): - return self._get_data_str(True) - - def __str__(self): - return self._get_data_str(False) - - def _get_data_str(self, formal): - file_mgr = self.simulation_data.mfpath - data_str = ( - "name = {}\nmodel_type = {}\nversion = {}\nmodel_" - "relative_path = {}" - "\n\n".format( - self.name, - self.model_type, - self.version, - file_mgr.model_relative_path[self.name], - ) - ) - - for package in self.packagelist: - pk_str = package._get_data_str(formal, False) - if formal: - if len(pk_str.strip()) > 0: - data_str = ( - "{}###################\nPackage {}\n" - "###################\n\n" - "{}\n".format(data_str, package._get_pname(), pk_str) - ) - else: - pk_str = package._get_data_str(formal, False) - if len(pk_str.strip()) > 0: - data_str = ( - "{}###################\nPackage {}\n" - "###################\n\n" - "{}\n".format(data_str, package._get_pname(), pk_str) - ) - return data_str - - @property - def package_key_dict(self): - """ - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_type_dict - - @property - def package_dict(self): - """Returns a copy of the package name dictionary. - - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_dict - - @property - def package_names(self): - """Returns a list of package names. - - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_names - - @property - def package_type_dict(self): - """ - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_type_dict - - @property - def package_name_dict(self): - """ - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_name_dict - - @property - def package_filename_dict(self): - """ - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_filename_dict - - @property - def nper(self): - """Number of stress periods. - - Returns - ------- - nper : int - Number of stress periods in the simulation. - - """ - try: - return self.simulation.tdis.nper.array - except AttributeError: - return None - - @property - def modeltime(self): - """Model time discretization information. - - Returns - ------- - modeltime : ModelTime - FloPy object containing time discretization information for the - simulation. - - """ - tdis = self.simulation.get_package("tdis", type_only=True) - period_data = tdis.perioddata.get_data() - - # build steady state data - sto = self.get_package("sto", type_only=True) - if sto is None: - steady = np.full((len(period_data["perlen"])), True, dtype=bool) - else: - steady = np.full((len(period_data["perlen"])), False, dtype=bool) - ss_periods = sto.steady_state.get_active_key_dict() - for period, val in ss_periods.items(): - if val: - ss_periods[period] = sto.steady_state.get_data(period) - tr_periods = sto.transient.get_active_key_dict() - for period, val in tr_periods.items(): - if val: - tr_periods[period] = sto.transient.get_data(period) - if ss_periods: - last_ss_value = False - # loop through steady state array - for index, value in enumerate(steady): - # resolve if current index is steady state or transient - if index in ss_periods and ss_periods[index]: - last_ss_value = True - elif index in tr_periods and tr_periods[index]: - last_ss_value = False - if last_ss_value is True: - steady[index] = True - - # build model time - itmuni = tdis.time_units.get_data() - start_date_time = tdis.start_date_time.get_data() - - self._model_time = ModelTime( - perlen=period_data["perlen"], - nstp=period_data["nstp"], - tsmult=period_data["tsmult"], - time_units=itmuni, - start_datetime=start_date_time, - steady_state=steady - ) - return self._model_time - - @property - def modeldiscrit(self): - """Basic model spatial discretization information. This is used - internally prior to model spatial discretization information being - fully loaded. - - Returns - ------- - model grid : Grid subclass - FloPy object containing basic spatial discretization information - for the model. - - """ - if self.get_grid_type() == DiscretizationType.DIS: - dis = self.get_package("dis") - return StructuredGrid( - nlay=dis.nlay.get_data(), - nrow=dis.nrow.get_data(), - ncol=dis.ncol.get_data(), - ) - elif self.get_grid_type() == DiscretizationType.DISV: - dis = self.get_package("disv") - return VertexGrid( - ncpl=dis.ncpl.get_data(), nlay=dis.nlay.get_data() - ) - elif self.get_grid_type() == DiscretizationType.DISU: - dis = self.get_package("disu") - nodes = dis.nodes.get_data() - ncpl = np.array([nodes], dtype=int) - return UnstructuredGrid(ncpl=ncpl) - - @property - def modelgrid(self): - """Model spatial discretization information. - - Returns - ------- - model grid : Grid subclass - FloPy object containing spatial discretization information for the - model. - - """ - force_resync = False - if not self._mg_resync: - return self._modelgrid - if self.get_grid_type() == DiscretizationType.DIS: - dis = self.get_package("dis") - if not hasattr(dis, "_init_complete"): - if not hasattr(dis, "delr"): - # dis package has not yet been initialized - return self._modelgrid - else: - # dis package has been partially initialized - self._modelgrid = StructuredGrid( - delc=dis.delc.array, - delr=dis.delr.array, - top=None, - botm=None, - idomain=None, - lenuni=None, - crs=self._modelgrid.crs, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot, - ) - else: - botm = dis.botm.array - idomain = dis.idomain.array - if idomain is None: - force_resync = True - idomain = self._resolve_idomain(idomain, botm) - self._modelgrid = StructuredGrid( - delc=dis.delc.array, - delr=dis.delr.array, - top=dis.top.array, - botm=botm, - idomain=idomain, - lenuni=dis.length_units.array, - crs=self._modelgrid.crs, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot, - ) - elif self.get_grid_type() == DiscretizationType.DISV: - dis = self.get_package("disv") - if not hasattr(dis, "_init_complete"): - if not hasattr(dis, "cell2d"): - # disv package has not yet been initialized - return self._modelgrid - else: - # disv package has been partially initialized - self._modelgrid = VertexGrid( - vertices=dis.vertices.array, - cell2d=dis.cell2d.array, - top=None, - botm=None, - idomain=None, - lenuni=None, - crs=self._modelgrid.crs, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot, - ) - else: - botm = dis.botm.array - idomain = dis.idomain.array - if idomain is None: - force_resync = True - idomain = self._resolve_idomain(idomain, botm) - self._modelgrid = VertexGrid( - vertices=dis.vertices.array, - cell2d=dis.cell2d.array, - top=dis.top.array, - botm=botm, - idomain=idomain, - lenuni=dis.length_units.array, - crs=self._modelgrid.crs, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot, - ) - elif self.get_grid_type() == DiscretizationType.DISU: - dis = self.get_package("disu") - if not hasattr(dis, "_init_complete"): - # disu package has not yet been fully initialized - return self._modelgrid - - # check to see if ncpl can be constructed from ihc array, - # otherwise set ncpl equal to [nodes] - ihc = dis.ihc.array - iac = dis.iac.array - ncpl = UnstructuredGrid.ncpl_from_ihc(ihc, iac) - if ncpl is None: - ncpl = np.array([dis.nodes.get_data()], dtype=int) - cell2d = dis.cell2d.array - idomain = dis.idomain.array - if idomain is None: - idomain = np.ones(dis.nodes.array, dtype=int) - if cell2d is None: - if ( - self.simulation.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - print( - "WARNING: cell2d information missing. Functionality of " - "the UnstructuredGrid will be limited." - ) - - vertices = dis.vertices.array - if vertices is None: - if ( - self.simulation.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - print( - "WARNING: vertices information missing. Functionality " - "of the UnstructuredGrid will be limited." - ) - vertices = None - else: - vertices = np.array(vertices) - - self._modelgrid = UnstructuredGrid( - vertices=vertices, - cell2d=cell2d, - top=dis.top.array, - botm=dis.bot.array, - idomain=idomain, - lenuni=dis.length_units.array, - ncpl=ncpl, - crs=self._modelgrid.crs, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot, - iac=dis.iac.array, - ja=dis.ja.array, - ) - elif self.get_grid_type() == DiscretizationType.DISV1D: - dis = self.get_package("disv1d") - if not hasattr(dis, "_init_complete"): - if not hasattr(dis, "cell1d"): - # disv package has not yet been initialized - return self._modelgrid - else: - # disv package has been partially initialized - self._modelgrid = VertexGrid( - vertices=dis.vertices.array, - cell1d=dis.cell1d.array, - top=None, - botm=None, - idomain=None, - lenuni=None, - crs=self._modelgrid.crs, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot, - ) - else: - botm = dis.bottom.array - idomain = dis.idomain.array - if idomain is None: - force_resync = True - idomain = self._resolve_idomain(idomain, botm) - self._modelgrid = VertexGrid( - vertices=dis.vertices.array, - cell1d=dis.cell1d.array, - top=None, - botm=botm, - idomain=idomain, - lenuni=dis.length_units.array, - crs=self._modelgrid.crs, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot, - ) - elif self.get_grid_type() == DiscretizationType.DIS2D: - dis = self.get_package("dis2d") - if not hasattr(dis, "_init_complete"): - if not hasattr(dis, "delr"): - # dis package has not yet been initialized - return self._modelgrid - else: - # dis package has been partially initialized - self._modelgrid = StructuredGrid( - delc=dis.delc.array, - delr=dis.delr.array, - top=None, - botm=None, - idomain=None, - lenuni=None, - crs=self._modelgrid.crs, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot, - ) - else: - botm = dis.bottom.array - idomain = dis.idomain.array - if idomain is None: - force_resync = True - idomain = self._resolve_idomain(idomain, botm) - self._modelgrid = StructuredGrid( - delc=dis.delc.array, - delr=dis.delr.array, - top=None, - botm=botm, - idomain=idomain, - lenuni=dis.length_units.array, - crs=self._modelgrid.crs, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot, - ) - elif self.get_grid_type() == DiscretizationType.DISV2D: - dis = self.get_package("disv2d") - if not hasattr(dis, "_init_complete"): - if not hasattr(dis, "cell2d"): - # disv package has not yet been initialized - return self._modelgrid - else: - # disv package has been partially initialized - self._modelgrid = VertexGrid( - vertices=dis.vertices.array, - cell2d=dis.cell2d.array, - top=None, - botm=None, - idomain=None, - lenuni=None, - crs=self._modelgrid.crs, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot, - ) - else: - botm = dis.bottom.array - idomain = dis.idomain.array - if idomain is None: - force_resync = True - idomain = self._resolve_idomain(idomain, botm) - self._modelgrid = VertexGrid( - vertices=dis.vertices.array, - cell2d=dis.cell2d.array, - top=None, - botm=botm, - idomain=idomain, - lenuni=dis.length_units.array, - crs=self._modelgrid.crs, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot, - ) - else: - return self._modelgrid - - # get coordinate data from dis file - xorig = dis.xorigin.get_data() - yorig = dis.yorigin.get_data() - angrot = dis.angrot.get_data() - - # resolve offsets - if xorig is None: - xorig = self._modelgrid.xoffset - if xorig is None: - if self._xul is not None: - xorig = self._modelgrid._xul_to_xll(self._xul) - else: - xorig = 0.0 - if yorig is None: - yorig = self._modelgrid.yoffset - if yorig is None: - if self._yul is not None: - yorig = self._modelgrid._yul_to_yll(self._yul) - else: - yorig = 0.0 - if angrot is None: - angrot = self._modelgrid.angrot - self._modelgrid.set_coord_info( - xorig, - yorig, - angrot, - self._modelgrid.crs, - ) - self._mg_resync = not self._modelgrid.is_complete or force_resync - return self._modelgrid - - @property - def packagelist(self): - """List of model packages.""" - return self._package_container.packagelist - - @property - def namefile(self): - """Model namefile object.""" - return self.model_nam_file - - @property - def model_ws(self): - """Model file path.""" - file_mgr = self.simulation_data.mfpath - return file_mgr.get_model_path(self.name) - - @property - def exename(self): - """MODFLOW executable name""" - return self.exe_name - - @property - def version(self): - """Version of MODFLOW""" - return self._version - - @property - def solver_tols(self): - """Returns the solver inner hclose and rclose values. - - Returns - ------- - inner_hclose, rclose : float, float - - """ - ims = self.get_ims_package() - if ims is not None: - rclose = ims.rcloserecord.get_data() - if rclose is not None: - rclose = rclose[0][0] - return ims.inner_hclose.get_data(), rclose - return None - - @property - def laytyp(self): - """Layering type""" - try: - return self.npf.icelltype.array - except AttributeError: - return None - - @property - def hdry(self): - """Dry cell value""" - return -1e30 - - @property - def hnoflo(self): - """No-flow cell value""" - return 1e30 - - @property - def laycbd(self): - """Quasi-3D confining bed. Not supported in MODFLOW-6. - - Returns - ------- - None : None - - """ - return None - - @property - def output(self): - budgetkey = None - if self.model_type == "gwt6": - budgetkey = "MASS BUDGET FOR ENTIRE MODEL" - try: - return MF6Output(self.oc, budgetkey=budgetkey) - except AttributeError: - return MF6Output(self, budgetkey=budgetkey) - - def export(self, f, **kwargs): - """Method to export a model to a shapefile or netcdf file - - Parameters - ---------- - f : str - File name (".nc" for netcdf or ".shp" for shapefile) - or dictionary of .... - **kwargs : keyword arguments - modelgrid: flopy.discretization.Grid - User supplied modelgrid object which will supersede the built - in modelgrid object - if fmt is set to 'vtk', parameters of Vtk initializer - - """ - from ..export import utils - - return utils.model_export(f, self, **kwargs) - - @property - def verbose(self): - """Verbose setting for model operations (True/False)""" - return self._verbose - - @verbose.setter - def verbose(self, verbose): - """Verbose setting for model operations (True/False)""" - self._verbose = verbose - - def check(self, f=None, verbose=True, level=1): - """ - Check model data for common errors. - - Warning - ------- - The MF6 check mechanism is deprecated pending reimplementation - in a future release. While the checks API will remain in place - through 3.x, it may be unstable, and will likely change in 4.x. - - Parameters - ---------- - f : str or file handle - String defining file name or file handle for summary file - of check method output. If a string is passed a file handle - is created. If f is None, check method does not write - results to a summary file. (default is None) - verbose : bool - Boolean flag used to determine if check method results are - written to the screen - level : int - Check method analysis level. If level=0, summary checks are - performed. If level=1, full checks are performed. - - Returns - ------- - success : bool - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow.load('model.nam') - >>> m.check() - """ - - # check instance for model-level check - chk = mf6check(self, f=f, verbose=verbose, level=level) - - return self._check(chk, level) - - @staticmethod - def load_base( - cls_child, - simulation, - structure, - modelname="NewModel", - model_nam_file="modflowtest.nam", - mtype="gwf", - version="mf6", - exe_name: Union[str, os.PathLike] = "mf6", - strict=True, - model_rel_path=os.curdir, - load_only=None, - ): - """ - Class method that loads an existing model. - - Parameters - ---------- - simulation : MFSimulation - simulation object that this model is a part of - simulation_data : MFSimulationData - simulation data object - structure : MFModelStructure - structure of this type of model - model_name : str - name of the model - model_nam_file : str - relative path to the model name file from model working folder - version : str - version of modflow - exe_name : str or PathLike - model executable name or path - strict : bool - strict mode when loading files - model_rel_path : str - relative path of model folder to simulation folder - load_only : list - list of package abbreviations or package names corresponding to - packages that flopy will load. default is None, which loads all - packages. the discretization packages will load regardless of this - setting. subpackages, like time series and observations, will also - load regardless of this setting. - example list: ['ic', 'maw', 'npf', 'oc', 'my_well_package_1'] - - Returns - ------- - model : MFModel - - Examples - -------- - """ - instance = cls_child( - simulation, - modelname, - model_nam_file=model_nam_file, - version=version, - exe_name=exe_name, - add_to_simulation=False, - structure=structure, - model_rel_path=model_rel_path, - ) - - # build case consistent load_only dictionary for quick lookups - load_only = PackageContainer._load_only_dict(load_only) - - # load name file - instance.name_file.load(strict) - - # order packages - vnum = mfstructure.MFStructure().get_version_string() - # FIX: Transport - Priority packages maybe should not be hard coded - priority_packages = { - f"dis{vnum}": 1, - f"disv{vnum}": 1, - f"disu{vnum}": 1, - } - packages_ordered = [] - package_recarray = instance.simulation_data.mfdata[ - (modelname, "nam", "packages", "packages") - ] - if package_recarray.array is None: - return instance - - for item in package_recarray.get_data(): - if item[0] in priority_packages: - packages_ordered.insert(0, (item[0], item[1], item[2])) - else: - packages_ordered.append((item[0], item[1], item[2])) - - # load packages - sim_struct = mfstructure.MFStructure().sim_struct - instance._ftype_num_dict = {} - for ftype, fname, pname in packages_ordered: - ftype_orig = ftype - ftype = ftype[0:-1].lower() - if ( - ftype in structure.package_struct_objs - or ftype in sim_struct.utl_struct_objs - ): - if ( - load_only is not None - and not PackageContainer._in_pkg_list( - priority_packages, ftype_orig, pname - ) - and not PackageContainer._in_pkg_list( - load_only, ftype_orig, pname - ) - ): - if ( - simulation.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - print(f" skipping package {ftype}...") - continue - if model_rel_path and model_rel_path != ".": - # strip off model relative path from the file path - filemgr = simulation.simulation_data.mfpath - fname = filemgr.strip_model_relative_path(modelname, fname) - if ( - simulation.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - print(f" loading package {ftype}...") - # load package - instance.load_package(ftype, fname, pname, strict, None) - sim_data = simulation.simulation_data - if ftype == "dis" and not sim_data.max_columns_user_set: - # set column wrap to ncol - dis = instance.get_package("dis", type_only=True) - if dis is not None and hasattr(dis, "ncol"): - sim_data.max_columns_of_data = dis.ncol.get_data() - sim_data.max_columns_user_set = False - sim_data.max_columns_auto_set = True - # load referenced packages - if modelname in instance.simulation_data.referenced_files: - for ref_file in instance.simulation_data.referenced_files[ - modelname - ].values(): - if ( - ref_file.file_type in structure.package_struct_objs - or ref_file.file_type in sim_struct.utl_struct_objs - ) and not ref_file.loaded: - instance.load_package( - ref_file.file_type, - ref_file.file_name, - None, - strict, - ref_file.reference_path, - ) - ref_file.loaded = True - - # TODO: fix jagged lists where appropriate - - return instance - - def inspect_cells( - self, - cell_list, - stress_period=None, - output_file_path=None, - inspect_budget=True, - inspect_dependent_var=True, - ): - """ - Inspect model cells. Returns model data associated with cells. - - Parameters - ---------- - cell_list : list of tuples - List of model cells. Each model cell is a tuple of integers. - ex: [(1,1,1), (2,4,3)] - stress_period : int - For transient data qnly return data from this stress period. If - not specified or None, all stress period data will be returned. - output_file_path: str - Path to output file that will contain the inspection results - inspect_budget: bool - Inspect budget file - inspect_dependent_var: bool - Inspect head file - Returns - ------- - output : dict - Dictionary containing inspection results - - Examples - -------- - - >>> import flopy - >>> sim = flopy.mf6.MFSimulationBase.load("name", "mf6", "mf6", ".") - >>> model = sim.get_model() - >>> inspect_list = [(2, 3, 2), (0, 4, 2), (0, 2, 4)] - >>> out_file = os.path.join("temp", "inspect_AdvGW_tidal.csv") - >>> model.inspect_cells(inspect_list, output_file_path=out_file) - """ - # handle no cell case - if cell_list is None or len(cell_list) == 0: - return None - - output_by_package = {} - # loop through all packages - for pp in self.packagelist: - # call the package's "inspect_cells" method - package_output = pp.inspect_cells(cell_list, stress_period) - if len(package_output) > 0: - output_by_package[f"{pp.package_name} package"] = ( - package_output - ) - # get dependent variable - if inspect_dependent_var: - try: - if self.model_type == "gwf6": - heads = self.output.head() - name = "heads" - elif self.model_type == "gwt6": - heads = self.output.concentration() - name = "concentration" - else: - inspect_dependent_var = False - except Exception: - inspect_dependent_var = False - if inspect_dependent_var and heads is not None: - kstp_kper_lst = heads.get_kstpkper() - data_output = DataSearchOutput((name,)) - data_output.output = True - for kstp_kper in kstp_kper_lst: - if stress_period is not None and stress_period != kstp_kper[1]: - continue - head_array = np.array(heads.get_data(kstpkper=kstp_kper)) - # flatten output data in disv and disu cases - if len(cell_list[0]) == 2: - head_array = head_array[0, :, :] - elif len(cell_list[0]) == 1: - head_array = head_array[0, 0, :] - # find data matches - self.match_array_cells( - cell_list, - head_array.shape, - head_array, - kstp_kper, - data_output, - ) - if len(data_output.data_entries) > 0: - output_by_package[f"{name} output"] = [data_output] - - # get model dimensions - model_shape = self.modelgrid.shape - - # get budgets - if inspect_budget: - try: - bud = self.output.budget() - except Exception: - inspect_budget = False - if inspect_budget and bud is not None: - kstp_kper_lst = bud.get_kstpkper() - rec_names = bud.get_unique_record_names() - budget_matches = [] - for rec_name in rec_names: - # clean up binary string name - string_name = str(rec_name)[3:-1].strip() - data_output = DataSearchOutput((string_name,)) - data_output.output = True - for kstp_kper in kstp_kper_lst: - if ( - stress_period is not None - and stress_period != kstp_kper[1] - ): - continue - budget_array = np.array( - bud.get_data( - kstpkper=kstp_kper, - text=rec_name, - full3D=True, - )[0] - ) - if len(budget_array.shape) == 4: - # get rid of 4th "time" dimension - budget_array = budget_array[0, :, :, :] - # flatten output data in disv and disu cases - if len(cell_list[0]) == 2 and len(budget_array.shape) >= 3: - budget_array = budget_array[0, :, :] - elif ( - len(cell_list[0]) == 1 and len(budget_array.shape) >= 2 - ): - budget_array = budget_array[0, :] - # find data matches - if budget_array.shape != model_shape: - # no support yet for different shaped budgets like - # flow_ja_face - continue - - self.match_array_cells( - cell_list, - budget_array.shape, - budget_array, - kstp_kper, - data_output, - ) - if len(data_output.data_entries) > 0: - budget_matches.append(data_output) - if len(budget_matches) > 0: - output_by_package["budget output"] = budget_matches - - if len(output_by_package) > 0 and output_file_path is not None: - with open(output_file_path, "w") as fd: - # write document header - fd.write(f"Inspect cell results for model {self.name}\n") - output = [] - for cell in cell_list: - output.append(" ".join([str(i) for i in cell])) - output = ",".join(output) - fd.write(f"Model cells inspected,{output}\n\n") - - for package_name, matches in output_by_package.items(): - fd.write(f"Results from {package_name}\n") - for search_output in matches: - # write header line with data name - fd.write( - f",Results from " - f"{search_output.path_to_data[-1]}\n" - ) - # write data header - if search_output.transient: - if search_output.output: - fd.write(",stress_period,time_step") - else: - fd.write(",stress_period/key") - if search_output.data_header is not None: - if len(search_output.data_entry_cellids) > 0: - fd.write(",cellid") - h_columns = ",".join(search_output.data_header) - fd.write(f",{h_columns}\n") - else: - fd.write(",cellid,data\n") - # write data found - for index, data_entry in enumerate( - search_output.data_entries - ): - if search_output.transient: - sp = search_output.data_entry_stress_period[ - index - ] - if search_output.output: - fd.write(f",{sp[1]},{sp[0]}") - else: - fd.write(f",{sp}") - if search_output.data_header is not None: - if len(search_output.data_entry_cellids) > 0: - cells = search_output.data_entry_cellids[ - index - ] - output = " ".join([str(i) for i in cells]) - fd.write(f",{output}") - fd.write(self._format_data_entry(data_entry)) - else: - output = " ".join( - [ - str(i) - for i in search_output.data_entry_ids[ - index - ] - ] - ) - fd.write(f",{output}") - fd.write(self._format_data_entry(data_entry)) - fd.write("\n") - return output_by_package - - def match_array_cells( - self, cell_list, data_shape, array_data, key, data_output - ): - # loop through list of cells we are searching for - for cell in cell_list: - if len(data_shape) == 3 or data_shape[0] == "nodes": - # data is by cell - if array_data.ndim == 3 and len(cell) == 3: - data_output.data_entries.append( - array_data[cell[0], cell[1], cell[2]] - ) - data_output.data_entry_ids.append(cell) - data_output.data_entry_stress_period.append(key) - elif array_data.ndim == 2 and len(cell) == 2: - data_output.data_entries.append( - array_data[cell[0], cell[1]] - ) - data_output.data_entry_ids.append(cell) - data_output.data_entry_stress_period.append(key) - elif array_data.ndim == 1 and len(cell) == 1: - data_output.data_entries.append(array_data[cell[0]]) - data_output.data_entry_ids.append(cell) - data_output.data_entry_stress_period.append(key) - else: - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - warning_str = ( - 'WARNING: CellID "{}" not same ' - "number of dimensions as data " - "{}.".format(cell, data_output.path_to_data) - ) - print(warning_str) - elif len(data_shape) == 2: - # get data based on ncpl/lay - if array_data.ndim == 2 and len(cell) == 2: - data_output.data_entries.append( - array_data[cell[0], cell[1]] - ) - data_output.data_entry_ids.append(cell) - data_output.data_entry_stress_period.append(key) - elif array_data.ndim == 1 and len(cell) == 1: - data_output.data_entries.append(array_data[cell[0]]) - data_output.data_entry_ids.append(cell) - data_output.data_entry_stress_period.append(key) - elif len(data_shape) == 1: - # get data based on nodes - if len(cell) == 1 and array_data.ndim == 1: - data_output.data_entries.append(array_data[cell[0]]) - data_output.data_entry_ids.append(cell) - data_output.data_entry_stress_period.append(key) - - @staticmethod - def _format_data_entry(data_entry): - output = "" - if iterable(data_entry, True): - for item in data_entry: - if isinstance(item, tuple): - formatted = " ".join([str(i) for i in item]) - output = f"{output},{formatted}" - else: - output = f"{output},{item}" - return f"{output}\n" - else: - return f",{data_entry}\n" - - def write(self, ext_file_action=ExtFileAction.copy_relative_paths): - """ - Writes out model's package files. - - Parameters - ---------- - ext_file_action : ExtFileAction - Defines what to do with external files when the simulation path has - changed. defaults to copy_relative_paths which copies only files - with relative paths, leaving files defined by absolute paths fixed. - - """ - - # write name file - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - print(" writing model name file...") - - self.name_file.write(ext_file_action=ext_file_action) - - if not self.simulation_data.max_columns_user_set: - grid_type = self.get_grid_type() - if grid_type == DiscretizationType.DIS: - self.simulation_data.max_columns_of_data = self.dis.ncol.get_data() - self.simulation_data.max_columns_user_set = False - self.simulation_data.max_columns_auto_set = True - - # write packages - for pp in self.packagelist: - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - print(f" writing package {pp._get_pname()}...") - pp.write(ext_file_action=ext_file_action) - - def get_grid_type(self): - """ - Return the type of grid used by model 'model_name' in simulation - containing simulation data 'simulation_data'. - - Returns - ------- - grid type : DiscretizationType - """ - package_recarray = self.name_file.packages - structure = mfstructure.MFStructure() - if ( - package_recarray.search_data( - f"dis{structure.get_version_string()}", 0 - ) - is not None - ): - return DiscretizationType.DIS - elif ( - package_recarray.search_data( - f"disv{structure.get_version_string()}", 0 - ) - is not None - ): - return DiscretizationType.DISV - elif ( - package_recarray.search_data( - f"disu{structure.get_version_string()}", 0 - ) - is not None - ): - return DiscretizationType.DISU - elif ( - package_recarray.search_data( - f"disv1d{structure.get_version_string()}", 0 - ) - is not None - ): - return DiscretizationType.DISV1D - elif ( - package_recarray.search_data( - f"dis2d{structure.get_version_string()}", 0 - ) - is not None - ): - return DiscretizationType.DIS2D - elif ( - package_recarray.search_data( - f"disv2d{structure.get_version_string()}", 0 - ) - is not None - ): - return DiscretizationType.DISV2D - - return DiscretizationType.UNDEFINED - - def get_ims_package(self): - """Get the IMS package associated with this model. - - Returns - ------- - IMS package : ModflowIms - """ - solution_group = self.simulation.name_file.solutiongroup.get_data(0) - for record in solution_group: - for name in record.dtype.names: - if name == "slntype" or name == "slnfname": - continue - if record[name] == self.name: - return self.simulation.get_solution_package( - record.slnfname - ) - return None - - def get_steadystate_list(self): - """Returns a list of stress periods that are steady state. - - Returns - ------- - steady state list : list - - """ - ss_list = [] - tdis = self.simulation.get_package("tdis") - period_data = tdis.perioddata.get_data() - index = 0 - pd_len = len(period_data) - while index < pd_len: - ss_list.append(True) - index += 1 - - storage = self.get_package("sto", type_only=True) - if storage is not None: - tr_keys = storage.transient.get_keys(True) - ss_keys = storage.steady_state.get_keys(True) - for key in tr_keys: - ss_list[key] = False - for ss_list_key in range(key + 1, len(ss_list)): - for ss_key in ss_keys: - if ss_key == ss_list_key: - break - ss_list[key] = False - return ss_list - - def is_valid(self): - """ - Checks the validity of the model and all of its packages - - Returns - ------- - valid : bool - - """ - - # valid name file - if not self.name_file.is_valid(): - return False - - # valid packages - for pp in self.packagelist: - if not pp.is_valid(): - return False - - # required packages exist - for package_struct in self.structure.package_struct_objs.values(): - if ( - not package_struct.optional - and package_struct.file_type - not in self._package_container.package_type_dict - ): - return False - - return True - - def set_model_relative_path(self, model_ws): - """ - Sets the file path to the model folder relative to the simulation - folder and updates all model file paths, placing them in the model - folder. - - Parameters - ---------- - model_ws : str - Model working folder relative to simulation working folder - - """ - # set all data internal - self.set_all_data_internal(False) - - # update path in the file manager - file_mgr = self.simulation_data.mfpath - file_mgr.set_last_accessed_model_path() - path = model_ws - file_mgr.model_relative_path[self.name] = path - - if ( - model_ws - and model_ws != "." - and self.simulation.name_file is not None - ): - model_folder_path = file_mgr.get_model_path(self.name) - if not os.path.exists(model_folder_path): - # make new model folder - os.makedirs(model_folder_path) - # update model name file location in simulation name file - models = self.simulation.name_file.models - models_data = models.get_data() - for index, entry in enumerate(models_data): - old_model_file_name = os.path.split(entry[1])[1] - old_model_base_name = os.path.splitext(old_model_file_name)[0] - if ( - old_model_base_name.lower() == self.name.lower() - or self.name == entry[2] - ): - models_data[index][1] = os.path.join( - path, old_model_file_name - ) - break - models.set_data(models_data) - - if self.name_file is not None: - # update listing file location in model name file - list_file = self.name_file.list.get_data() - if list_file: - path, list_file_name = os.path.split(list_file) - try: - self.name_file.list.set_data( - os.path.join(path, list_file_name) - ) - except MFDataException as mfde: - message = ( - "Error occurred while setting relative " - 'path "{}" in model ' - '"{}".'.format( - os.path.join(path, list_file_name), self.name - ) - ) - raise MFDataException( - mfdata_except=mfde, - model=self.model_name, - package=self.name_file._get_pname(), - message=message, - ) - # update package file locations in model name file - packages = self.name_file.packages - packages_data = packages.get_data() - if packages_data is not None: - for index, entry in enumerate(packages_data): - # get package object associated with entry - package = None - if len(entry) >= 3: - package = self.get_package(entry[2]) - if package is None: - package = self.get_package(entry[0]) - if package is not None: - # combine model relative path with package path - packages_data[index][1] = os.path.join( - path, package.filename - ) - else: - # package not found, create path based on - # information in name file - old_package_name = os.path.split(entry[1])[-1] - packages_data[index][1] = os.path.join( - path, old_package_name - ) - packages.set_data(packages_data) - # update files referenced from within packages - for package in self.packagelist: - package.set_model_relative_path(model_ws) - - def _remove_package_from_dictionaries(self, package): - # remove package from local dictionaries and lists - if package.path in self._package_paths: - del self._package_paths[package.path] - self._package_container.remove_package(package) - - def get_package(self, name=None, type_only=False, name_only=False): - """ - Finds a package by package name, package key, package type, or partial - package name. returns either a single package, a list of packages, - or None. - - Parameters - ---------- - name : str - Name or type of the package, 'my-riv-1, 'RIV', 'LPF', etc. - type_only : bool - Search for package by type only - name_only : bool - Search for package by name only - - Returns - ------- - pp : Package object - - """ - return self._package_container.get_package(name, type_only, name_only) - - def remove_package(self, package_name): - """ - Removes package and all child packages from the model. - `package_name` can be the package's name, type, or package object to - be removed from the model. - - Parameters - ---------- - package_name : str - Package name, package type, or package object to be removed from - the model. - - """ - if isinstance(package_name, MFPackage): - packages = [package_name] - else: - packages = self.get_package(package_name) - if not isinstance(packages, list) and packages is not None: - packages = [packages] - if packages is None: - return - for package in packages: - if package.model_or_sim.name != self.name: - except_text = ( - "Package can not be removed from model " - "{self.model_name} since it is not part of it." - ) - raise mfstructure.FlopyException(except_text) - - self._remove_package_from_dictionaries(package) - - try: - # remove package from name file - package_data = self.name_file.packages.get_data() - except MFDataException as mfde: - message = ( - "Error occurred while reading package names " - "from name file in model " - f'"{self.name}"' - ) - raise MFDataException( - mfdata_except=mfde, - model=self.model_name, - package=self.name_file._get_pname(), - message=message, - ) - try: - new_rec_array = None - for item in package_data: - filename = os.path.basename(item[1]) - if filename != package.filename: - if new_rec_array is None: - new_rec_array = np.rec.array( - [item.tolist()], package_data.dtype - ) - else: - new_rec_array = np.hstack((item, new_rec_array)) - except: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.structure.get_model(), - self.structure.get_package(), - self._path, - "building package recarray", - self.structure.name, - inspect.stack()[0][3], - type_, - value_, - traceback_, - None, - self.simulation_data.debug, - ) - try: - self.name_file.packages.set_data(new_rec_array) - except MFDataException as mfde: - message = ( - "Error occurred while setting package names " - f'from name file in model "{self.name}". Package name ' - f"data:\n{new_rec_array}" - ) - raise MFDataException( - mfdata_except=mfde, - model=self.model_name, - package=self.name_file._get_pname(), - message=message, - ) - - # build list of child packages - child_package_list = [] - for pkg in self.packagelist: - if ( - pkg.parent_file is not None - and pkg.parent_file.path == package.path - ): - child_package_list.append(pkg) - # remove child packages - for child_package in child_package_list: - self._remove_package_from_dictionaries(child_package) - - def update_package_filename(self, package, new_name): - """ - Updates the filename for a package. For internal flopy use only. - - Parameters - ---------- - package : MFPackage - Package object - new_name : str - New package name - """ - try: - # get namefile package data - package_data = self.name_file.packages.get_data() - except MFDataException as mfde: - message = ( - "Error occurred while updating package names " - "from name file in model " - f'"{self.name}".' - ) - raise MFDataException( - mfdata_except=mfde, - model=self.model_name, - package=self.name_file._get_pname(), - message=message, - ) - try: - file_mgr = self.simulation_data.mfpath - model_rel_path = file_mgr.model_relative_path[self.name] - # update namefile package data with new name - new_rec_array = None - old_leaf = os.path.split(package.filename)[1] - for item in package_data: - leaf = os.path.split(item[1])[1] - if leaf == old_leaf: - item[1] = os.path.join(model_rel_path, new_name) - - if new_rec_array is None: - new_rec_array = np.rec.array( - [item.tolist()], package_data.dtype - ) - else: - new_rec_array = np.hstack((item, new_rec_array)) - except: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.structure.get_model(), - self.structure.get_package(), - self._path, - "updating package filename", - self.structure.name, - inspect.stack()[0][3], - type_, - value_, - traceback_, - None, - self.simulation_data.debug, - ) - try: - self.name_file.packages.set_data(new_rec_array) - except MFDataException as mfde: - message = ( - "Error occurred while updating package names " - f'from name file in model "{self.name}". Package name ' - f"data:\n{new_rec_array}" - ) - raise MFDataException( - mfdata_except=mfde, - model=self.model_name, - package=self.name_file._get_pname(), - message=message, - ) - - def rename_all_packages(self, name): - """Renames all package files in the model. - - Parameters - ---------- - name : str - Prefix of package names. Packages files will be named - .. - - """ - nam_filename = f"{name}.nam" - self.simulation.rename_model_namefile(self, nam_filename) - self.name_file.filename = nam_filename - self.model_nam_file = nam_filename - package_type_count = {} - for package in self.packagelist: - if package.package_type not in package_type_count: - base_filename, leaf = os.path.split(package.filename) - lleaf = leaf.split(".") - if len(lleaf) > 1: - # keep existing extension - ext = lleaf[-1] - else: - # no extension found, create a new one - ext = package.package_type - new_fileleaf = f"{name}.{ext}" - if base_filename != "": - package.filename = os.path.join( - base_filename, new_fileleaf - ) - else: - package.filename = new_fileleaf - package_type_count[package.package_type] = 1 - else: - package_type_count[package.package_type] += 1 - package.filename = "{}_{}.{}".format( - name, - package_type_count[package.package_type], - package.package_type, - ) - - def set_all_data_external( - self, - check_data=True, - external_data_folder=None, - base_name=None, - binary=False, - ): - """Sets the model's list and array data to be stored externally. - - Warning - ------- - The MF6 check mechanism is deprecated pending reimplementation - in a future release. While the checks API will remain in place - through 3.x, it may be unstable, and will likely change in 4.x. - - Parameters - ---------- - check_data : bool - Determines if data error checking is enabled during this - process. - external_data_folder - Folder, relative to the simulation path or model relative path - (see use_model_relative_path parameter), where external data - will be stored - base_name: str - Base file name prefix for all files - binary: bool - Whether file will be stored as binary - - """ - for package in self.packagelist: - package.set_all_data_external( - check_data, - external_data_folder, - base_name, - binary, - ) - - def set_all_data_internal(self, check_data=True): - """Sets the model's list and array data to be stored externally. - - Parameters - ---------- - check_data : bool - Determines if data error checking is enabled during this - process. - - """ - for package in self.packagelist: - package.set_all_data_internal(check_data) - - def register_package( - self, - package, - add_to_package_list=True, - set_package_name=True, - set_package_filename=True, - ): - """ - Registers a package with the model. This method is used internally - by FloPy and is not intended for use by the end user. - - Parameters - ---------- - package : MFPackage - Package to register - add_to_package_list : bool - Add package to lookup list - set_package_name : bool - Produce a package name for this package - set_package_filename : bool - Produce a filename for this package - - Returns - ------- - path, package structure : tuple, MFPackageStructure - - """ - package.container_type = [PackageContainerType.model] - if package.parent_file is not None: - path = package.parent_file.path + (package.package_type,) - else: - path = (self.name, package.package_type) - package_struct = self.structure.get_package_struct( - package.package_type - ) - if add_to_package_list and path in self._package_paths: - if ( - package_struct is not None - and not package_struct.multi_package_support - and not isinstance(package.parent_file, MFPackage) - ): - # package of this type already exists, replace it - self.remove_package(package.package_type) - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - print( - "WARNING: Package with type {} already exists. " - "Replacing existing package" - ".".format(package.package_type) - ) - elif ( - not set_package_name - and package.package_name - in self._package_container.package_name_dict - ): - # package of this type with this name already - # exists, replace it - self.remove_package( - self._package_container.package_name_dict[ - package.package_name - ] - ) - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - print( - "WARNING: Package with name {} already exists. " - "Replacing existing package" - ".".format(package.package_name) - ) - - # make sure path is unique - if path in self._package_paths: - path_iter = datautil.PathIter(path) - for new_path in path_iter: - if new_path not in self._package_paths: - path = new_path - break - self._package_paths[path] = 1 - - if package.package_type.lower() == "nam": - if not package.internal_package: - excpt_str = ( - "Unable to register nam file. Do not create your own nam " - "files. Nam files are automatically created and managed " - "for you by FloPy." - ) - print(excpt_str) - raise FlopyException(excpt_str) - - return path, self.structure.name_file_struct_obj - - package_extension = package.package_type - if set_package_name: - # produce a default package name - if ( - package_struct is not None - and package_struct.multi_package_support - ): - # check for other registered packages of this type - name_iter = datautil.NameIter(package.package_type, False) - for package_name in name_iter: - if ( - package_name - not in self._package_container.package_name_dict - ): - package.package_name = package_name - suffix = package_name.split("_") - if ( - len(suffix) > 1 - and datautil.DatumUtil.is_int(suffix[-1]) - and suffix[-1] != "0" - ): - # update file extension to make unique - package_extension = ( - f"{package_extension}_{suffix[-1]}" - ) - break - else: - package.package_name = package.package_type - - if set_package_filename: - # filename uses model base name - package._filename = f"{self.name}.{package.package_type}" - if ( - package._filename - in self._package_container.package_filename_dict - ): - # auto generate a unique file name and register it - file_name = MFFileMgmt.unique_file_name( - package._filename, - self._package_container.package_filename_dict, - ) - package._filename = file_name - - if add_to_package_list: - self._package_container.add_package(package) - - # add obs file to name file if it does not have a parent - if package.package_type in self.structure.package_struct_objs or ( - package.package_type == "obs" and package.parent_file is None - ): - # update model name file - pkg_type = package.package_type.upper() - if ( - package.package_type != "obs" and - self.structure.package_struct_objs[ - package.package_type - ].read_as_arrays - ): - pkg_type = pkg_type[0:-1] - # Model Assumption - assuming all name files have a package - # recarray - file_mgr = self.simulation_data.mfpath - model_rel_path = file_mgr.model_relative_path[self.name] - if model_rel_path != ".": - package_rel_path = os.path.join( - model_rel_path, package.filename - ) - else: - package_rel_path = package.filename - self.name_file.packages.update_record( - [ - f"{pkg_type}6", - package_rel_path, - package.package_name, - ], - 0, - ) - if package_struct is not None: - return (path, package_struct) - else: - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - print( - "WARNING: Unable to register unsupported file type {} " - "for model {}.".format(package.package_type, self.name) - ) - return None, None - - def load_package( - self, - ftype, - fname, - pname, - strict, - ref_path, - dict_package_name=None, - parent_package: Optional[MFPackage] = None, - ): - """ - Loads a package from a file. This method is used internally by FloPy - and is not intended for the end user. - - Parameters - ---------- - ftype : str - the file type - fname : str - the name of the file containing the package input - pname : str - the user-defined name for the package - strict : bool - strict mode when loading the file - ref_path : str - path to the file. uses local path if set to None - dict_package_name : str - package name for dictionary lookup - parent_package : MFPackage - parent package - - Examples - -------- - """ - if ref_path is not None: - fname = os.path.join(ref_path, fname) - sim_struct = mfstructure.MFStructure().sim_struct - if ( - ftype in self.structure.package_struct_objs - and self.structure.package_struct_objs[ftype].multi_package_support - ) or ( - ftype in sim_struct.utl_struct_objs - and sim_struct.utl_struct_objs[ftype].multi_package_support - ): - # resolve dictionary name for package - if dict_package_name is not None: - if parent_package is not None: - dict_package_name = f"{parent_package.path[-1]}_{ftype}" - else: - # use dict_package_name as the base name - if ftype in self._ftype_num_dict: - self._ftype_num_dict[dict_package_name] += 1 - else: - self._ftype_num_dict[dict_package_name] = 0 - dict_package_name = "{}_{}".format( - dict_package_name, - self._ftype_num_dict[dict_package_name], - ) - else: - # use ftype as the base name - if ftype in self._ftype_num_dict: - self._ftype_num_dict[ftype] += 1 - else: - self._ftype_num_dict[ftype] = 1 - if pname is not None: - dict_package_name = pname - else: - dict_package_name = ( - f"{ftype}-{self._ftype_num_dict[ftype]}" - ) - else: - dict_package_name = ftype - - # clean up model type text - model_type = self.structure.model_type - while datautil.DatumUtil.is_int(model_type[-1]): - model_type = model_type[0:-1] - - # create package - package_obj = PackageContainer.package_factory(ftype, model_type) - package = package_obj( - self, - filename=fname, - pname=dict_package_name, - loading_package=True, - parent_file=parent_package, - _internal_package=True, - ) - try: - package.load(strict) - except ReadAsArraysException: - # create ReadAsArrays package and load it instead - package_obj = PackageContainer.package_factory( - f"{ftype}a", model_type - ) - package = package_obj( - self, - filename=fname, - pname=dict_package_name, - loading_package=True, - parent_file=parent_package, - _internal_package=True, - ) - package.load(strict) - - # register child package with the model - self._package_container.add_package(package) - if parent_package is not None: - # register child package with the parent package - parent_package.add_package(package) - - return package - - def plot(self, SelPackList=None, **kwargs): - """ - Plot 2-D, 3-D, transient 2-D, and stress period list (MfList) - model input data from a model instance - - Args: - model: Flopy model instance - SelPackList: (list) list of package names to plot, if none - all packages will be plotted - - **kwargs : dict - filename_base : str - Base file name that will be used to automatically generate file - names for output image files. Plots will be exported as image - files if file_name_base is not None. (default is None) - file_extension : str - Valid matplotlib.pyplot file extension for savefig(). Only used - if filename_base is not None. (default is 'png') - mflay : int - MODFLOW zero-based layer number to return. If None, then all - all layers will be included. (default is None) - kper : int - MODFLOW zero-based stress period number to return. - (default is zero) - key : str - MfList dictionary key. (default is None) - - Returns: - axes : list - Empty list is returned if filename_base is not None. Otherwise - a list of matplotlib.pyplot.axis are returned. - """ - from ..plot.plotutil import PlotUtilities - - axes = PlotUtilities._plot_model_helper( - self, SelPackList=SelPackList, **kwargs - ) - - return axes - - @staticmethod - def _resolve_idomain(idomain, botm): - if idomain is None: - if botm is None: - return idomain - else: - return np.ones_like(botm) - return idomain - - @staticmethod - def netcdf_attrs(mname, mtype, grid_type, mesh=None): - """Return dictionary of dataset (model) scoped attributes - Parameters - ---------- - mname : str - model name - mtype : str - model type - grid_type: - DiscretizationType - mesh : str - mesh type if dataset is ugrid compliant - """ - attrs = { - "modflow_grid": "", - "modflow_model": "", - } - if grid_type == DiscretizationType.DIS: - attrs["modflow_grid"] = "STRUCTURED" - elif grid_type == DiscretizationType.DISV: - attrs["modflow_grid"] = "VERTEX" - - attrs["modflow_model"] = ( - f"{mname.upper()}: MODFLOW 6 {mtype.upper()} model" - ) - - # supported => LAYERED - if mesh: - attrs["mesh"] = mesh - - return attrs - - def netcdf_info(self, mesh=None): - """Return dictionary of dataset (model) scoped attributes - Parameters - ---------- - mesh : str - mesh type if dataset is ugrid compliant - """ - attrs = MFModel.netcdf_attrs( - self.name, - self.model_type, - self.get_grid_type(), - mesh - ) - - res_d = {} - res_d['attrs'] = attrs - return res_d diff --git a/flopy/mf6/tmp/ruff/2/mfpackage.py b/flopy/mf6/tmp/ruff/2/mfpackage.py deleted file mode 100644 index 90931d2b66..0000000000 --- a/flopy/mf6/tmp/ruff/2/mfpackage.py +++ /dev/null @@ -1,3720 +0,0 @@ -import copy -import datetime -import errno -import inspect -import os -import sys -import warnings - -import numpy as np - -from ..mbase import ModelInterface -from ..pakbase import PackageInterface -from ..utils import datautil -from ..utils.check import mf6check -from ..version import __version__ -from .coordinates import modeldimensions -from .data import ( - mfdata, - mfdataarray, - mfdatalist, - mfdataplist, - mfdatascalar, - mfstructure, -) -from .data.mfdatautil import DataSearchOutput, MFComment, cellids_equal -from .data.mfstructure import DatumType, MFDataItemStructure, MFStructure -from .mfbase import ( - ExtFileAction, - FlopyException, - MFDataException, - MFFileMgmt, - MFInvalidTransientBlockHeaderException, - PackageContainer, - PackageContainerType, - ReadAsArraysException, - VerbosityLevel, -) -from .utils.output_util import MF6Output - - -class MFBlockHeader: - """ - Represents the header of a block in a MF6 input file. This class is used - internally by FloPy and its direct use by a user of this library is not - recommend. - - Parameters - ---------- - name : str - Block name - variable_strings : list - List of strings that appear after the block name - comment : MFComment - Comment text in the block header - - Attributes - ---------- - name : str - Block name - variable_strings : list - List of strings that appear after the block name - comment : MFComment - Comment text in the block header - data_items : list - List of MFVariable of the variables contained in this block - - """ - - def __init__( - self, - name, - variable_strings, - comment, - simulation_data=None, - path=None, - block=None, - ): - self.name = name - self.variable_strings = variable_strings - self.block = block - if not ( - (simulation_data is None and path is None) - or (simulation_data is not None and path is not None) - ): - raise FlopyException( - "Block header must be initialized with both " - "simulation_data and path or with neither." - ) - if simulation_data is None: - self.comment = comment - self.simulation_data = None - self.path = path - self.comment_path = None - else: - self.connect_to_dict(simulation_data, path, comment) - # TODO: Get data_items from dictionary - self.data_items = [] - # build block comment paths - self.blk_trailing_comment_path = ("blk_trailing_comment",) - self.blk_post_comment_path = ("blk_post_comment",) - if isinstance(path, list): - path = tuple(path) - if path is not None: - self.blk_trailing_comment_path = path + ( - name, - "blk_trailing_comment", - ) - self.blk_post_comment_path = path + ( - name, - "blk_post_comment", - ) - if self.blk_trailing_comment_path not in simulation_data.mfdata: - simulation_data.mfdata[self.blk_trailing_comment_path] = ( - MFComment("", "", simulation_data, 0) - ) - if self.blk_post_comment_path not in simulation_data.mfdata: - simulation_data.mfdata[self.blk_post_comment_path] = MFComment( - "\n", "", simulation_data, 0 - ) - else: - self.blk_trailing_comment_path = ("blk_trailing_comment",) - self.blk_post_comment_path = ("blk_post_comment",) - - def __lt__(self, other): - transient_key = self.get_transient_key() - if transient_key is None: - return True - else: - other_key = other.get_transient_key() - if other_key is None: - return False - else: - return transient_key < other_key - - def build_header_variables( - self, - simulation_data, - block_header_structure, - block_path, - data, - dimensions, - ): - """Builds data objects to hold header variables.""" - self.data_items = [] - var_path = block_path + (block_header_structure[0].name,) - - # fix up data - fixed_data = [] - if ( - block_header_structure[0].data_item_structures[0].type - == DatumType.keyword - ): - data_item = block_header_structure[0].data_item_structures[0] - fixed_data.append(data_item.name) - if isinstance(data, tuple): - data = list(data) - if isinstance(data, list): - fixed_data = fixed_data + data - else: - fixed_data.append(data) - if len(fixed_data) > 0: - fixed_data = [tuple(fixed_data)] - # create data object - new_data = self.block.data_factory( - simulation_data, - None, - block_header_structure[0], - True, - var_path, - dimensions, - fixed_data, - ) - - self.add_data_item(new_data, data) - - def add_data_item(self, new_data, data): - """Adds data to the block.""" - self.data_items.append(new_data) - while isinstance(data, list): - if len(data) > 0: - data = data[0] - else: - data = None - if not isinstance(data, tuple): - data = (data,) - self.blk_trailing_comment_path += data - self.blk_post_comment_path += data - - def is_same_header(self, block_header): - """Checks if `block_header` is the same header as this header.""" - if len(self.variable_strings) > 0: - if len(self.variable_strings) != len( - block_header.variable_strings - ): - return False - else: - for sitem, oitem in zip( - self.variable_strings, block_header.variable_strings - ): - if sitem != oitem: - return False - return True - elif ( - len(self.data_items) > 0 and len(block_header.variable_strings) > 0 - ): - typ_obj = ( - self.data_items[0].structure.data_item_structures[0].type_obj - ) - if typ_obj == int or typ_obj == float: - return bool( - self.variable_strings[0] - == block_header.variable_strings[0] - ) - else: - return True - elif len(self.data_items) == len(block_header.variable_strings): - return True - return False - - def get_comment(self): - """Get block header comment""" - if self.simulation_data is None: - return self.comment - else: - return self.simulation_data.mfdata[self.comment_path] - - def connect_to_dict(self, simulation_data, path, comment=None): - """Add comment to the simulation dictionary""" - self.simulation_data = simulation_data - self.path = path - self.comment_path = path + ("blk_hdr_comment",) - if comment is None: - simulation_data.mfdata[self.comment_path] = self.comment - else: - simulation_data.mfdata[self.comment_path] = comment - self.comment = None - - def write_header(self, fd): - """Writes block header to file object `fd`. - - Parameters - ---------- - fd : file object - File object to write block header to. - - """ - fd.write(f"BEGIN {self.name}") - if len(self.data_items) > 0: - if isinstance(self.data_items[0], mfdatascalar.MFScalar): - one_based = ( - self.data_items[0].structure.type == DatumType.integer - ) - entry = self.data_items[0].get_file_entry( - values_only=True, one_based=one_based - ) - else: - entry = self.data_items[0].get_file_entry() - fd.write(str(entry.rstrip())) - if len(self.data_items) > 1: - for data_item in self.data_items[1:]: - entry = data_item.get_file_entry(values_only=True) - fd.write(str(entry).rstrip()) - if self.get_comment().text: - fd.write(" ") - self.get_comment().write(fd) - fd.write("\n") - - def write_footer(self, fd): - """Writes block footer to file object `fd`. - - Parameters - ---------- - fd : file object - File object to write block footer to. - - """ - fd.write(f"END {self.name}") - if len(self.data_items) > 0: - one_based = self.data_items[0].structure.type == DatumType.integer - if isinstance(self.data_items[0], mfdatascalar.MFScalar): - entry = self.data_items[0].get_file_entry( - values_only=True, one_based=one_based - ) - else: - entry = self.data_items[0].get_file_entry() - fd.write(str(entry.rstrip())) - fd.write("\n") - - def get_transient_key(self, data_path=None): - """Get transient key associated with this block header.""" - transient_key = None - for index in range(0, len(self.data_items)): - if self.data_items[index].structure.type != DatumType.keyword: - if data_path == self.data_items[index].path: - # avoid infinite recursion - return True - transient_key = self.data_items[index].get_data() - if isinstance(transient_key, np.recarray): - item_struct = self.data_items[index].structure - key_index = item_struct.first_non_keyword_index() - if not ( - key_index is not None - and len(transient_key[0]) > key_index - ): - if key_index is None: - raise FlopyException( - "Block header index could " - "not be determined." - ) - else: - raise FlopyException( - 'Block header index "{}" ' - 'must be less than "{}"' - ".".format(key_index, len(transient_key[0])) - ) - transient_key = transient_key[0][key_index] - break - return transient_key - - -class MFBlock: - """ - Represents a block in a MF6 input file. This class is used internally - by FloPy and use by users of the FloPy library is not recommended. - - Parameters - ---------- - simulation_data : MFSimulationData - Data specific to this simulation - dimensions : MFDimensions - Describes model dimensions including model grid and simulation time - structure : MFVariableStructure - Structure describing block - path : tuple - Unique path to block - - Attributes - ---------- - block_headers : MFBlockHeader - Block header text (BEGIN/END), header variables, comments in the - header - structure : MFBlockStructure - Structure describing block - path : tuple - Unique path to block - datasets : OrderDict - Dictionary of dataset objects with keys that are the name of the - dataset - datasets_keyword : dict - Dictionary of dataset objects with keys that are key words to identify - start of dataset - enabled : bool - If block is being used in the simulation - - """ - - def __init__( - self, - simulation_data, - dimensions, - structure, - path, - model_or_sim, - container_package, - ): - self._simulation_data = simulation_data - self._dimensions = dimensions - self._model_or_sim = model_or_sim - self._container_package = container_package - self.block_headers = [ - MFBlockHeader( - structure.name, - [], - MFComment("", path, simulation_data, 0), - simulation_data, - path, - self, - ) - ] - self.structure = structure - self.path = path - self.datasets = {} - self.datasets_keyword = {} - # initially disable if optional - self.enabled = structure.number_non_optional_data() > 0 - self.loaded = False - self.external_file_name = None - self._structure_init() - - def __repr__(self): - return self._get_data_str(True) - - def __str__(self): - return self._get_data_str(False) - - def _get_data_str(self, formal): - data_str = "" - for dataset in self.datasets.values(): - if formal: - ds_repr = repr(dataset) - if len(ds_repr.strip()) > 0: - data_str = ( - f"{data_str}{dataset.structure.name}\n{dataset!r}\n" - ) - else: - ds_str = str(dataset) - if len(ds_str.strip()) > 0: - data_str = ( - f"{data_str}{dataset.structure.name}\n{dataset!s}\n" - ) - return data_str - - # return an MFScalar, MFList, or MFArray - def data_factory( - self, - sim_data, - model_or_sim, - structure, - enable, - path, - dimensions, - data=None, - package=None, - ): - """Creates the appropriate data child object derived from MFData.""" - data_type = structure.get_datatype() - # examine the data structure and determine the data type - if ( - data_type == mfstructure.DataType.scalar_keyword - or data_type == mfstructure.DataType.scalar - ): - return mfdatascalar.MFScalar( - sim_data, - model_or_sim, - structure, - data, - enable, - path, - dimensions, - ) - elif ( - data_type == mfstructure.DataType.scalar_keyword_transient - or data_type == mfstructure.DataType.scalar_transient - ): - trans_scalar = mfdatascalar.MFScalarTransient( - sim_data, model_or_sim, structure, enable, path, dimensions - ) - if data is not None: - trans_scalar.set_data(data, key=0) - return trans_scalar - elif data_type == mfstructure.DataType.array: - return mfdataarray.MFArray( - sim_data, - model_or_sim, - structure, - data, - enable, - path, - dimensions, - self, - ) - elif data_type == mfstructure.DataType.array_transient: - trans_array = mfdataarray.MFTransientArray( - sim_data, - model_or_sim, - structure, - enable, - path, - dimensions, - self, - ) - if data is not None: - trans_array.set_data(data, key=0) - return trans_array - elif data_type == mfstructure.DataType.list: - if ( - structure.basic_item - and self._container_package.package_type.lower() != "nam" - and self._simulation_data.use_pandas - ): - return mfdataplist.MFPandasList( - sim_data, - model_or_sim, - structure, - data, - enable, - path, - dimensions, - package, - self, - ) - else: - return mfdatalist.MFList( - sim_data, - model_or_sim, - structure, - data, - enable, - path, - dimensions, - package, - self, - ) - elif data_type == mfstructure.DataType.list_transient: - if structure.basic_item and self._simulation_data.use_pandas: - trans_list = mfdataplist.MFPandasTransientList( - sim_data, - model_or_sim, - structure, - enable, - path, - dimensions, - package, - self, - ) - else: - trans_list = mfdatalist.MFTransientList( - sim_data, - model_or_sim, - structure, - enable, - path, - dimensions, - package, - self, - ) - if data is not None: - trans_list.set_data(data, key=0, autofill=True) - return trans_list - elif data_type == mfstructure.DataType.list_multiple: - mult_list = mfdatalist.MFMultipleList( - sim_data, - model_or_sim, - structure, - enable, - path, - dimensions, - package, - self, - ) - if data is not None: - mult_list.set_data(data, key=0, autofill=True) - return mult_list - - def _structure_init(self): - # load datasets keywords into dictionary - for dataset_struct in self.structure.data_structures.values(): - for keyword in dataset_struct.get_keywords(): - self.datasets_keyword[keyword] = dataset_struct - # load block header data items into dictionary - for dataset in self.structure.block_header_structure: - self._new_dataset(dataset.name, dataset, True, None) - - def set_model_relative_path(self, model_ws): - """Sets `model_ws` as the model path relative to the simulation's - path. - - Parameters - ---------- - model_ws : str - Model path relative to the simulation's path. - """ - # update datasets - for key, dataset in self.datasets.items(): - if dataset.structure.file_data: - try: - file_data = dataset.get_data() - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message="Error occurred while " - "getting file data from " - '"{}"'.format(dataset.structure.name), - ) - if file_data: - # update file path location for all file paths - for file_line in file_data: - old_file_name = os.path.split(file_line[0])[1] - file_line[0] = os.path.join(model_ws, old_file_name) - # update block headers - for block_header in self.block_headers: - for dataset in block_header.data_items: - if dataset.structure.file_data: - try: - file_data = dataset.get_data() - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message="Error occurred while " - "getting file data from " - '"{}"'.format(dataset.structure.name), - ) - - if file_data: - # update file path location for all file paths - for file_line in file_data: - old_file_path, old_file_name = os.path.split( - file_line[1] - ) - new_file_path = os.path.join( - model_ws, old_file_name - ) - # update transient keys of datasets within the - # block - for key, idataset in self.datasets.items(): - if isinstance(idataset, mfdata.MFTransient): - idataset.update_transient_key( - file_line[1], new_file_path - ) - file_line[1] = os.path.join( - model_ws, old_file_name - ) - - def add_dataset(self, dataset_struct, data, var_path): - """Add data to this block.""" - try: - self.datasets[var_path[-1]] = self.data_factory( - self._simulation_data, - self._model_or_sim, - dataset_struct, - True, - var_path, - self._dimensions, - data, - self._container_package, - ) - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message="Error occurred while adding" - ' dataset "{}" to block ' - '"{}"'.format(dataset_struct.name, self.structure.name), - ) - - self._simulation_data.mfdata[var_path] = self.datasets[var_path[-1]] - dtype = dataset_struct.get_datatype() - if ( - dtype == mfstructure.DataType.list_transient - or dtype == mfstructure.DataType.list_multiple - or dtype == mfstructure.DataType.array_transient - ): - # build repeating block header(s) - if isinstance(data, dict): - # Add block headers for each dictionary key - for index in data: - if isinstance(index, tuple): - header_list = list(index) - else: - header_list = [index] - self._build_repeating_header(header_list) - elif isinstance(data, list): - # Add a single block header of value 0 - self._build_repeating_header([0]) - elif ( - dtype != mfstructure.DataType.list_multiple - and data is not None - ): - self._build_repeating_header([[0]]) - - return self.datasets[var_path[-1]] - - def _build_repeating_header(self, header_data): - if self.header_exists(header_data[0]): - return - if ( - len(self.block_headers[-1].data_items) == 1 - and self.block_headers[-1].data_items[0].get_data() is not None - ): - block_header_path = self.path + (len(self.block_headers) + 1,) - block_header = MFBlockHeader( - self.structure.name, - [], - MFComment("", self.path, self._simulation_data, 0), - self._simulation_data, - block_header_path, - self, - ) - self.block_headers.append(block_header) - else: - block_header_path = self.path + (len(self.block_headers),) - - struct = self.structure - last_header = self.block_headers[-1] - try: - last_header.build_header_variables( - self._simulation_data, - struct.block_header_structure, - block_header_path, - header_data, - self._dimensions, - ) - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message="Error occurred while building" - " block header variables for block " - '"{}"'.format(last_header.name), - ) - - def _new_dataset( - self, key, dataset_struct, block_header=False, initial_val=None - ): - dataset_path = self.path + (key,) - if block_header: - if ( - dataset_struct.type == DatumType.integer - and initial_val is not None - and len(initial_val) >= 1 - and dataset_struct.get_record_size()[0] == 1 - ): - # stress periods are stored 0 based - initial_val = int(initial_val[0]) - 1 - if isinstance(initial_val, list): - initial_val_path = tuple(initial_val) - initial_val = [tuple(initial_val)] - else: - initial_val_path = initial_val - try: - new_data = self.data_factory( - self._simulation_data, - self._model_or_sim, - dataset_struct, - True, - dataset_path, - self._dimensions, - initial_val, - self._container_package, - ) - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message="Error occurred while adding" - ' dataset "{}" to block ' - '"{}"'.format(dataset_struct.name, self.structure.name), - ) - self.block_headers[-1].add_data_item(new_data, initial_val_path) - - else: - try: - self.datasets[key] = self.data_factory( - self._simulation_data, - self._model_or_sim, - dataset_struct, - True, - dataset_path, - self._dimensions, - initial_val, - self._container_package, - ) - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message="Error occurred while adding" - ' dataset "{}" to block ' - '"{}"'.format(dataset_struct.name, self.structure.name), - ) - for keyword in dataset_struct.get_keywords(): - self.datasets_keyword[keyword] = dataset_struct - - def is_empty(self): - """Returns true if this block is empty.""" - for key, dataset in self.datasets.items(): - try: - has_data = dataset.has_data() - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message="Error occurred while verifying" - ' data of dataset "{}" in block ' - '"{}"'.format(dataset.structure.name, self.structure.name), - ) - - if has_data is not None and has_data: - return False - return True - - def load(self, block_header, fd, strict=True): - """Loads block from file object. file object must be advanced to - beginning of block before calling. - - Parameters - ---------- - block_header : MFBlockHeader - Block header for block block being loaded. - fd : file - File descriptor of file being loaded - strict : bool - Enforce strict MODFLOW 6 file format. - """ - # verify number of header variables - if ( - len(block_header.variable_strings) - < self.structure.number_non_optional_block_header_data() - ): - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - warning_str = ( - 'WARNING: Block header for block "{}" does not ' - "contain the correct number of " - "variables {}".format(block_header.name, self.path) - ) - print(warning_str) - return - - if self.loaded: - # verify header has not already been loaded - for bh_current in self.block_headers: - if bh_current.is_same_header(block_header): - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - warning_str = ( - 'WARNING: Block header for block "{}" is ' - "not a unique block header " - "{}".format(block_header.name, self.path) - ) - print(warning_str) - return - - # init - self.enabled = True - if not self.loaded: - self.block_headers = [] - block_header.block = self - self.block_headers.append(block_header) - - # process any header variable - if len(self.structure.block_header_structure) > 0: - dataset = self.structure.block_header_structure[0] - self._new_dataset( - dataset.name, - dataset, - True, - self.block_headers[-1].variable_strings, - ) - - # handle special readasarrays case - if ( - self._container_package.structure.read_as_arrays - or ( - hasattr(self._container_package, "aux") - and self._container_package.aux.structure.layered - ) - ): - # auxiliary variables may appear with aux variable name as keyword - aux_vars = self._container_package.auxiliary.get_data() - if aux_vars is not None: - for var_name in list(aux_vars[0])[1:]: - self.datasets_keyword[(var_name,)] = ( - self._container_package.aux.structure - ) - - comments = [] - - # capture any initial comments - initial_comment = MFComment("", "", 0) - fd_block = fd - line = fd_block.readline() - datautil.PyListUtil.reset_delimiter_used() - arr_line = datautil.PyListUtil.split_data_line(line) - post_data_comments = MFComment("", "", self._simulation_data, 0) - while MFComment.is_comment(line, True): - initial_comment.add_text(line) - line = fd_block.readline() - arr_line = datautil.PyListUtil.split_data_line(line) - - # if block not empty - external_file_info = None - if not (len(arr_line[0]) > 2 and arr_line[0][:3].upper() == "END"): - if arr_line[0].lower() == "open/close": - # open block contents from external file - fd_block.readline() - root_path = self._simulation_data.mfpath.get_sim_path() - try: - file_name = os.path.split(arr_line[1])[-1] - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print( - f' opening external file "{file_name}"...' - ) - external_file_info = arr_line - except: - type_, value_, traceback_ = sys.exc_info() - message = f'Error reading external file specified in line "{line}"' - raise MFDataException( - self._container_package.model_name, - self._container_package._get_pname(), - self.path, - "reading external file", - self.structure.name, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self._simulation_data.debug, - ) - if len(self.structure.data_structures) <= 1: - # load a single data set - dataset = self.datasets[next(iter(self.datasets))] - try: - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print( - f" loading data {dataset.structure.name}..." - ) - next_line = dataset.load( - line, - fd_block, - self.block_headers[-1], - initial_comment, - external_file_info, - ) - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message='Error occurred while loading data "{}" in ' - 'block "{}" from file "{}"' - ".".format( - dataset.structure.name, - self.structure.name, - fd_block.name, - ), - ) - package_info_list = self._get_package_info(dataset) - if package_info_list is not None: - for package_info in package_info_list: - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print( - f" loading child package {package_info[0]}..." - ) - fname = package_info[1] - if package_info[2] is not None: - fname = os.path.join(package_info[2], fname) - filemgr = self._simulation_data.mfpath - fname = filemgr.strip_model_relative_path( - self._model_or_sim.name, fname - ) - pkg = self._model_or_sim.load_package( - package_info[0], - fname, - package_info[1], - True, - "", - package_info[3], - self._container_package, - ) - if hasattr(self._container_package, package_info[0]): - package_group = getattr( - self._container_package, package_info[0] - ) - package_group._append_package( - pkg, pkg.filename, False - ) - - if next_line[1] is not None: - arr_line = datautil.PyListUtil.split_data_line( - next_line[1] - ) - else: - arr_line = "" - # capture any trailing comments - dataset.post_data_comments = post_data_comments - while arr_line and ( - len(next_line[1]) <= 2 or arr_line[0][:3].upper() != "END" - ): - next_line[1] = fd_block.readline().strip() - arr_line = datautil.PyListUtil.split_data_line( - next_line[1] - ) - if arr_line and ( - len(next_line[1]) <= 2 - or arr_line[0][:3].upper() != "END" - ): - post_data_comments.add_text(" ".join(arr_line)) - else: - # look for keyword and store line as data or comment - try: - key, results = self._find_data_by_keyword( - line, fd_block, initial_comment - ) - except MFInvalidTransientBlockHeaderException as e: - warning_str = f"WARNING: {e}" - print(warning_str) - self.block_headers.pop() - return - - self._save_comments(arr_line, line, key, comments) - if results[1] is None or results[1][:3].upper() != "END": - # block consists of unordered datasets - # load the data sets out of order based on - # initial constants - line = " " - while line != "": - line = fd_block.readline() - arr_line = datautil.PyListUtil.split_data_line(line) - if arr_line: - # determine if at end of block - if ( - len(arr_line[0]) > 2 - and arr_line[0][:3].upper() == "END" - ): - break - # look for keyword and store line as data o - # r comment - key, result = self._find_data_by_keyword( - line, fd_block, initial_comment - ) - self._save_comments(arr_line, line, key, comments) - if ( - result[1] is not None - and result[1][:3].upper() == "END" - ): - break - else: - # block empty, store empty array in block variables - empty_arr = [] - for ds in self.datasets.values(): - if isinstance(ds, mfdata.MFTransient): - transient_key = block_header.get_transient_key() - ds.set_data(empty_arr, key=transient_key) - self.loaded = True - self.is_valid() - - def _find_data_by_keyword(self, line, fd, initial_comment): - first_key = None - nothing_found = False - next_line = [True, line] - while next_line[0] and not nothing_found: - arr_line = datautil.PyListUtil.split_data_line(next_line[1]) - key = datautil.find_keyword(arr_line, self.datasets_keyword) - if key is not None: - ds_name = self.datasets_keyword[key].name - try: - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print(f" loading data {ds_name}...") - next_line = self.datasets[ds_name].load( - next_line[1], - fd, - self.block_headers[-1], - initial_comment, - ) - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message="Error occurred while " - 'loading data "{}" in ' - 'block "{}" from file "{}"' - ".".format(ds_name, self.structure.name, fd.name), - ) - - # see if first item's name indicates a reference to - # another package - package_info_list = self._get_package_info( - self.datasets[ds_name] - ) - if package_info_list is not None: - for package_info in package_info_list: - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print( - f" loading child package {package_info[1]}..." - ) - fname = package_info[1] - if package_info[2] is not None: - fname = os.path.join(package_info[2], fname) - filemgr = self._simulation_data.mfpath - fname = filemgr.strip_model_relative_path( - self._model_or_sim.name, fname - ) - pkg = self._model_or_sim.load_package( - package_info[0], - fname, - package_info[1], - True, - "", - package_info[3], - self._container_package, - ) - if hasattr(self._container_package, package_info[0]): - package_group = getattr( - self._container_package, package_info[0] - ) - package_group._append_package( - pkg, pkg.filename, False - ) - if first_key is None: - first_key = key - nothing_found = False - elif ( - arr_line[0].lower() == "readasarrays" - and self.path[-1].lower() == "options" - and self._container_package.structure.read_as_arrays is False - ): - error_msg = ( - "ERROR: Attempting to read a ReadAsArrays " - "package as a non-ReadAsArrays " - "package {}".format(self.path) - ) - raise ReadAsArraysException(error_msg) - else: - nothing_found = True - - if first_key is None: - # look for recarrays. if there is a lone recarray in this block, - # use it by default - recarrays = self.structure.get_all_recarrays() - if len(recarrays) != 1: - return key, [None, None] - dataset = self.datasets[recarrays[0].name] - ds_result = dataset.load( - line, fd, self.block_headers[-1], initial_comment - ) - - # see if first item's name indicates a reference to another - # package - package_info_list = self._get_package_info(dataset) - if package_info_list is not None: - for package_info in package_info_list: - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print( - f" loading child package {package_info[0]}..." - ) - fname = package_info[1] - if package_info[2] is not None: - fname = os.path.join(package_info[2], fname) - filemgr = self._simulation_data.mfpath - fname = filemgr.strip_model_relative_path( - self._model_or_sim.name, fname - ) - pkg = self._model_or_sim.load_package( - package_info[0], - fname, - None, - True, - "", - package_info[3], - self._container_package, - ) - if hasattr(self._container_package, package_info[0]): - package_group = getattr( - self._container_package, package_info[0] - ) - package_group._append_package(pkg, pkg.filename, False) - - return recarrays[0].keyword, ds_result - else: - return first_key, next_line - - def _get_package_info(self, dataset): - if not dataset.structure.file_data: - return None - for index in range(0, len(dataset.structure.data_item_structures)): - data_item = dataset.structure.data_item_structures[index] - if ( - data_item.type == DatumType.keyword - or data_item.type == DatumType.string - ): - item_name = data_item.name - package_type = item_name[:-1] - model_type = self._model_or_sim.structure.model_type - # not all packages have the same naming convention - # try different naming conventions to find the appropriate - # package - package_types = [ - package_type, - f"{self._container_package.package_type}" - f"{package_type}", - ] - package_type_found = None - for ptype in package_types: - if ( - PackageContainer.package_factory(ptype, model_type) - is not None - ): - package_type_found = ptype - break - if package_type_found is not None: - try: - data = dataset.get_data() - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message="Error occurred while " - 'getting data from "{}" ' - 'in block "{}".'.format( - dataset.structure.name, self.structure.name - ), - ) - package_info_list = [] - if isinstance(data, np.recarray): - for row in data: - self._add_to_info_list( - package_info_list, - row[index], - package_type_found, - ) - else: - self._add_to_info_list( - package_info_list, data, package_type_found - ) - - return package_info_list - return None - - def _add_to_info_list( - self, package_info_list, file_location, package_type_found - ): - file_path, file_name = os.path.split(file_location) - dict_package_name = f"{package_type_found}_{self.path[-2]}" - package_info_list.append( - ( - package_type_found, - file_name, - file_path, - dict_package_name, - ) - ) - - def _save_comments(self, arr_line, line, key, comments): - # FIX: Save these comments somewhere in the data set - if key not in self.datasets_keyword: - if MFComment.is_comment(key, True): - if comments: - comments.append("\n") - comments.append(arr_line) - - def write(self, fd, ext_file_action=ExtFileAction.copy_relative_paths): - """Writes block to a file object. - - Parameters - ---------- - fd : file object - File object to write to. - - """ - # never write an empty block - is_empty = self.is_empty() - if ( - is_empty - and self.structure.name.lower() != "exchanges" - and self.structure.name.lower() != "options" - and self.structure.name.lower() != "sources" - and self.structure.name.lower() != "stressperioddata" - ): - return - if self.structure.repeating(): - repeating_datasets = self._find_repeating_datasets() - for repeating_dataset in repeating_datasets: - # resolve any missing block headers - self._add_missing_block_headers(repeating_dataset) - for block_header in sorted(self.block_headers): - # write block - self._write_block(fd, block_header, ext_file_action) - else: - self._write_block(fd, self.block_headers[0], ext_file_action) - - def _add_missing_block_headers(self, repeating_dataset): - key_data_list = repeating_dataset.get_active_key_list() - # assemble a dictionary of data keys and empty keys - key_dict = {} - for key in key_data_list: - key_dict[key[0]] = True - for key, value in repeating_dataset.empty_keys.items(): - if value: - key_dict[key] = True - for key in key_dict.keys(): - has_data = repeating_dataset.has_data(key) - empty_key = ( - key in repeating_dataset.empty_keys - and repeating_dataset.empty_keys[key] - ) - if not self.header_exists(key) and (has_data or empty_key): - self._build_repeating_header([key]) - - def header_exists(self, key, data_path=None): - if not isinstance(key, list): - if key is None: - return - comp_key_list = [key] - else: - comp_key_list = key - for block_header in self.block_headers: - transient_key = block_header.get_transient_key(data_path) - if transient_key is True: - return - for comp_key in comp_key_list: - if transient_key is not None and transient_key == comp_key: - return True - return False - - def set_all_data_external( - self, - base_name, - check_data=True, - external_data_folder=None, - binary=False, - ): - """Sets the block's list and array data to be stored externally, - base_name is external file name's prefix, check_data determines - if data error checking is enabled during this process. - - Warning - ------- - The MF6 check mechanism is deprecated pending reimplementation - in a future release. While the checks API will remain in place - through 3.x, it may be unstable, and will likely change in 4.x. - - Parameters - ---------- - base_name : str - Base file name of external files where data will be written to. - check_data : bool - Whether to do data error checking. - external_data_folder - Folder where external data will be stored - binary: bool - Whether file will be stored as binary - - """ - - for key, dataset in self.datasets.items(): - lst_data = isinstance(dataset, mfdatalist.MFList) or isinstance( - dataset, mfdataplist.MFPandasList - ) - if ( - isinstance(dataset, mfdataarray.MFArray) - or (lst_data and dataset.structure.type == DatumType.recarray) - and dataset.enabled - ): - if not binary or ( - lst_data - and ( - dataset.data_dimensions.package_dim.boundnames() - or not dataset.structure.basic_item - ) - ): - ext = "txt" - binary = False - else: - ext = "bin" - file_path = f"{base_name}_{dataset.structure.name}.{ext}" - replace_existing_external = False - if external_data_folder is not None: - # get simulation root path - root_path = self._simulation_data.mfpath.get_sim_path() - # get model relative path, if it exists - if isinstance(self._model_or_sim, ModelInterface): - name = self._model_or_sim.name - rel_path = ( - self._simulation_data.mfpath.model_relative_path[ - name - ] - ) - if rel_path is not None: - root_path = os.path.join(root_path, rel_path) - full_path = os.path.join(root_path, external_data_folder) - if not os.path.exists(full_path): - # create new external data folder - os.makedirs(full_path) - file_path = os.path.join(external_data_folder, file_path) - replace_existing_external = True - dataset.store_as_external_file( - file_path, - replace_existing_external=replace_existing_external, - check_data=check_data, - binary=binary, - ) - - def set_all_data_internal(self, check_data=True): - """Sets the block's list and array data to be stored internally, - check_data determines if data error checking is enabled during this - process. - - Warning - ------- - The MF6 check mechanism is deprecated pending reimplementation - in a future release. While the checks API will remain in place - through 3.x, it may be unstable, and will likely change in 4.x. - - Parameters - ---------- - check_data : bool - Whether to do data error checking. - - """ - - for key, dataset in self.datasets.items(): - if ( - isinstance(dataset, mfdataarray.MFArray) - or ( - ( - isinstance(dataset, mfdatalist.MFList) - or isinstance(dataset, mfdataplist.MFPandasList) - ) - and dataset.structure.type == DatumType.recarray - ) - and dataset.enabled - ): - dataset.store_internal(check_data=check_data) - - def _find_repeating_datasets(self): - repeating_datasets = [] - for key, dataset in self.datasets.items(): - if dataset.repeating: - repeating_datasets.append(dataset) - return repeating_datasets - - def _prepare_external(self, fd, file_name, binary=False): - fd_main = fd - fd_path = self._simulation_data.mfpath.get_model_path(self.path[0]) - # resolve full file and folder path - fd_file_path = os.path.join(fd_path, file_name) - fd_folder_path = os.path.split(fd_file_path)[0] - if fd_folder_path != "": - if not os.path.exists(fd_folder_path): - # create new external data folder - os.makedirs(fd_folder_path) - return fd_main, fd_file_path - - def _write_block(self, fd, block_header, ext_file_action): - transient_key = None - basic_list = False - dataset_one = list(self.datasets.values())[0] - if isinstance( - dataset_one, - (mfdataplist.MFPandasList, mfdataplist.MFPandasTransientList), - ): - basic_list = True - for dataset in self.datasets.values(): - assert isinstance( - dataset, - ( - mfdataplist.MFPandasList, - mfdataplist.MFPandasTransientList, - ), - ) - # write block header - block_header.write_header(fd) - if len(block_header.data_items) > 0: - transient_key = block_header.get_transient_key() - - # gather data sets to write - data_set_output = [] - data_found = False - for key, dataset in self.datasets.items(): - try: - if transient_key is None: - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print( - f" writing data {dataset.structure.name}..." - ) - if basic_list: - ext_fname = dataset.external_file_name() - if ext_fname is not None: - binary = dataset.binary_ext_data() - # write block contents to external file - fd_main, fd = self._prepare_external( - fd, ext_fname, binary - ) - dataset.write_file_entry(fd, fd_main=fd_main) - fd = fd_main - else: - dataset.write_file_entry(fd) - else: - data_set_output.append( - dataset.get_file_entry( - ext_file_action=ext_file_action - ) - ) - data_found = True - else: - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print( - " writing data {} ({}).." ".".format( - dataset.structure.name, transient_key - ) - ) - if basic_list: - ext_fname = dataset.external_file_name(transient_key) - if ext_fname is not None: - binary = dataset.binary_ext_data(transient_key) - # write block contents to external file - fd_main, fd = self._prepare_external( - fd, ext_fname, binary - ) - dataset.write_file_entry( - fd, - transient_key, - ext_file_action=ext_file_action, - fd_main=fd_main, - ) - fd = fd_main - else: - dataset.write_file_entry( - fd, - transient_key, - ext_file_action=ext_file_action, - ) - else: - if dataset.repeating: - output = dataset.get_file_entry( - transient_key, ext_file_action=ext_file_action - ) - if output is not None: - data_set_output.append(output) - data_found = True - else: - data_set_output.append( - dataset.get_file_entry( - ext_file_action=ext_file_action - ) - ) - data_found = True - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message=( - "Error occurred while writing data " - f'"{dataset.structure.name}" in block ' - f'"{self.structure.name}" to file "{fd.name}"' - ), - ) - if not data_found: - return - if not basic_list: - # write block header - block_header.write_header(fd) - - if self.external_file_name is not None: - indent_string = self._simulation_data.indent_string - fd.write( - f"{indent_string}open/close " - f'"{self.external_file_name}"\n' - ) - # write block contents to external file - fd_main, fd = self._prepare_external( - fd, self.external_file_name - ) - # write data sets - for output in data_set_output: - fd.write(output) - - # write trailing comments - pth = block_header.blk_trailing_comment_path - if pth in self._simulation_data.mfdata: - self._simulation_data.mfdata[pth].write(fd) - - if self.external_file_name is not None and not basic_list: - # switch back writing to package file - fd.close() - fd = fd_main - - # write block footer - block_header.write_footer(fd) - - # write post block comments - pth = block_header.blk_post_comment_path - if pth in self._simulation_data.mfdata: - self._simulation_data.mfdata[pth].write(fd) - - # write extra line if comments are off - if not self._simulation_data.comments_on: - fd.write("\n") - - def is_allowed(self): - """Determine if block is valid based on the values of dependent - MODFLOW variables.""" - if self.structure.variable_dependant_path: - # fill in empty part of the path with the current path - if len(self.structure.variable_dependant_path) == 3: - dependant_var_path = ( - self.path[0], - ) + self.structure.variable_dependant_path - elif len(self.structure.variable_dependant_path) == 2: - dependant_var_path = ( - self.path[0], - self.path[1], - ) + self.structure.variable_dependant_path - elif len(self.structure.variable_dependant_path) == 1: - dependant_var_path = ( - self.path[0], - self.path[1], - self.path[2], - ) + self.structure.variable_dependant_path - else: - dependant_var_path = None - - # get dependency - dependant_var = None - mf_data = self._simulation_data.mfdata - if dependant_var_path in mf_data: - dependant_var = mf_data[dependant_var_path] - - # resolve dependency - if self.structure.variable_value_when_active[0] == "Exists": - exists = self.structure.variable_value_when_active[1] - if dependant_var and exists.lower() == "true": - return True - elif not dependant_var and exists.lower() == "false": - return True - else: - return False - elif not dependant_var: - return False - elif self.structure.variable_value_when_active[0] == ">": - min_val = self.structure.variable_value_when_active[1] - if dependant_var > float(min_val): - return True - else: - return False - elif self.structure.variable_value_when_active[0] == "<": - max_val = self.structure.variable_value_when_active[1] - if dependant_var < float(max_val): - return True - else: - return False - return True - - def is_valid(self): - """ - Returns true if the block is valid. - """ - # check data sets - for dataset in self.datasets.values(): - # Non-optional datasets must be enabled - if not dataset.structure.optional and not dataset.enabled: - return False - # Enabled blocks must be valid - if dataset.enabled and not dataset.is_valid: - return False - # check variables - for block_header in self.block_headers: - for dataset in block_header.data_items: - # Non-optional datasets must be enabled - if not dataset.structure.optional and not dataset.enabled: - return False - # Enabled blocks must be valid - if dataset.enabled and not dataset.is_valid(): - return False - - -class MFPackage(PackageInterface): - """ - Provides an interface for the user to specify data to build a package. - - Parameters - ---------- - parent : MFModel, MFSimulation, or MFPackage - The parent model, simulation, or package containing this package - package_type : str - String defining the package type - filename : str or PathLike - Name or path of file where this package is stored - quoted_filename : str - Filename with quotes around it when there is a space in the name - pname : str - Package name - loading_package : bool - Whether or not to add this package to the parent container's package - list during initialization - - Attributes - ---------- - blocks : dict - Dictionary of blocks contained in this package by block name - path : tuple - Data dictionary path to this package - structure : PackageStructure - Describes the blocks and data contain in this package - dimensions : PackageDimension - Resolves data dimensions for data within this package - - """ - - def __init__( - self, - parent, - package_type, - filename=None, - pname=None, - loading_package=False, - **kwargs, - ): - parent_file = kwargs.pop("parent_file", None) - if isinstance(parent, MFPackage): - self.model_or_sim = parent.model_or_sim - self.parent_file = parent - elif parent_file is not None: - self.model_or_sim = parent - self.parent_file = parent_file - else: - self.model_or_sim = parent - self.parent_file = None - _internal_package = kwargs.pop("_internal_package", False) - if _internal_package: - self.internal_package = True - else: - self.internal_package = False - self._data_list = [] - self._package_type = package_type - if self.model_or_sim.type == "Model" and package_type.lower() != "nam": - self.model_name = self.model_or_sim.name - else: - self.model_name = None - - # a package must have a dfn_file_name - if not hasattr(self, "dfn_file_name"): - self.dfn_file_name = "" - - if ( - self.model_or_sim.type != "Model" - and self.model_or_sim.type != "Simulation" - ): - message = ( - "Invalid model_or_sim parameter. Expecting either a " - 'model or a simulation. Instead type "{}" was ' - "given.".format(type(self.model_or_sim)) - ) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.model_name, - pname, - "", - "initializing package", - None, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self.model_or_sim.simulation_data.debug, - ) - - self._package_container = PackageContainer( - self.model_or_sim.simulation_data - ) - self.simulation_data = self.model_or_sim.simulation_data - - self.blocks = {} - self.container_type = [] - self.loading_package = loading_package - if pname is not None: - if not isinstance(pname, str): - message = ( - "Invalid pname parameter. Expecting type str. " - 'Instead type "{}" was ' - "given.".format(type(pname)) - ) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.model_name, - pname, - "", - "initializing package", - None, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self.model_or_sim.simulation_data.debug, - ) - - self.package_name = pname.lower() - else: - self.package_name = None - - if filename is None: - if self.model_or_sim.type == "Simulation": - # filename uses simulation base name - base_name = os.path.basename( - os.path.normpath(self.model_or_sim.name) - ) - self._filename = f"{base_name}.{package_type}" - else: - # filename uses model base name - self._filename = f"{self.model_or_sim.name}.{package_type}" - else: - if not isinstance(filename, (str, os.PathLike)): - message = ( - "Invalid fname parameter. Expecting type str. " - 'Instead type "{}" was ' - "given.".format(type(filename)) - ) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.model_name, - pname, - "", - "initializing package", - None, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self.model_or_sim.simulation_data.debug, - ) - self._filename = datautil.clean_filename( - str(filename).replace("\\", "/") - ) - self.path, self.structure = self.model_or_sim.register_package( - self, not loading_package, pname is None, filename is None - ) - self.dimensions = self.create_package_dimensions() - - if self.path is None: - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - print( - "WARNING: Package type {} failed to register property." - " {}".format(self._package_type, self.path) - ) - if self.parent_file is not None: - self.container_type.append(PackageContainerType.package) - # init variables that may be used later - self.post_block_comments = None - self.last_error = None - self.bc_color = "black" - self.__inattr = False - self._child_package_groups = {} - child_builder_call = kwargs.pop("child_builder_call", None) - if ( - self.parent_file is not None - and child_builder_call is None - and package_type in self.parent_file._child_package_groups - ): - # initialize as part of the parent's child package group - chld_pkg_grp = self.parent_file._child_package_groups[package_type] - chld_pkg_grp.init_package(self, self._filename, False) - - # remove any remaining valid kwargs - key_list = list(kwargs.keys()) - for key in key_list: - if "filerecord" in key and hasattr(self, f"{key}"): - kwargs.pop(f"{key}") - # check for extraneous kwargs - if len(kwargs) > 0: - kwargs_str = ", ".join(kwargs.keys()) - excpt_str = ( - f'Extraneous kwargs "{kwargs_str}" provided to MFPackage.' - ) - raise FlopyException(excpt_str) - - def __init_subclass__(cls): - """Register package type""" - super().__init_subclass__() - PackageContainer.packages_by_abbr[cls.package_abbr] = cls - - def __setattr__(self, name, value): - if hasattr(self, name) and getattr(self, name) is not None: - attribute = object.__getattribute__(self, name) - if attribute is not None and isinstance(attribute, mfdata.MFData): - try: - if isinstance(attribute, mfdatalist.MFList): - attribute.set_data(value, autofill=True) - else: - attribute.set_data(value) - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self.model_name, - package=self._get_pname(), - ) - return - - if all( - hasattr(self, attr) for attr in ["model_or_sim", "_package_type"] - ): - if hasattr(self.model_or_sim, "_mg_resync"): - if not self.model_or_sim._mg_resync: - self.model_or_sim._mg_resync = self._mg_resync - - super().__setattr__(name, value) - - def __repr__(self): - return self._get_data_str(True) - - def __str__(self): - return self._get_data_str(False) - - @property - def filename(self): - """Package's file name.""" - return self._filename - - @property - def quoted_filename(self): - """Package's file name with quotes if there is a space.""" - if " " in self._filename: - return f'"{self._filename}"' - return self._filename - - @filename.setter - def filename(self, fname): - """Package's file name.""" - if ( - isinstance(self.parent_file, MFPackage) - and self.package_type in self.parent_file._child_package_groups - ): - fname = datautil.clean_filename(fname) - try: - child_pkg_group = self.parent_file._child_package_groups[ - self.structure.file_type - ] - child_pkg_group._update_filename(self._filename, fname) - except Exception: - print( - "WARNING: Unable to update file name for parent" - f"package of {self.package_name}." - ) - if self.model_or_sim is not None and fname is not None: - if self._package_type != "nam": - self.model_or_sim.update_package_filename(self, fname) - self._filename = fname - - @property - def package_type(self): - """String describing type of package""" - return self._package_type - - @property - def name(self): - """Name of package""" - return [self.package_name] - - @name.setter - def name(self, name): - """Name of package""" - self.package_name = name - - @property - def parent(self): - """Parent package""" - return self.model_or_sim - - @parent.setter - def parent(self, parent): - """Parent package""" - assert False, "Do not use this setter to set the parent" - - @property - def plottable(self): - """If package is plottable""" - if self.model_or_sim.type == "Simulation": - return False - else: - return True - - @property - def output(self): - """ - Method to get output associated with a specific package - - Returns - ------- - MF6Output object - """ - return MF6Output(self) - - @property - def data_list(self): - """List of data in this package.""" - # return [data_object, data_object, ...] - return self._data_list - - @property - def package_key_dict(self): - """ - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_type_dict - - @property - def package_names(self): - """Returns a list of package names. - - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_names - - @property - def package_dict(self): - """ - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_dict - - @property - def package_type_dict(self): - """ - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_type_dict - - @property - def package_name_dict(self): - """ - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_name_dict - - @property - def package_filename_dict(self): - """ - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_filename_dict - - def get_package(self, name=None, type_only=False, name_only=False): - """ - Finds a package by package name, package key, package type, or partial - package name. returns either a single package, a list of packages, - or None. - - Parameters - ---------- - name : str - Name or type of the package, 'my-riv-1, 'RIV', 'LPF', etc. - type_only : bool - Search for package by type only - name_only : bool - Search for package by name only - - Returns - ------- - pp : Package object - - """ - return self._package_container.get_package(name, type_only, name_only) - - def add_package(self, package): - pkg_type = package.package_type.lower() - if pkg_type in self._package_container.package_type_dict: - for existing_pkg in self._package_container.package_type_dict[ - pkg_type - ]: - if existing_pkg is package: - # do not add the same package twice - return - self._package_container.add_package(package) - - def _get_aux_data(self, aux_names): - if hasattr(self, "stress_period_data"): - spd = self.stress_period_data.get_data() - if ( - 0 in spd - and spd[0] is not None - and aux_names[0][1] in spd[0].dtype.names - ): - return spd - if hasattr(self, "packagedata"): - pd = self.packagedata.get_data() - if aux_names[0][1] in pd.dtype.names: - return pd - if hasattr(self, "perioddata"): - spd = self.perioddata.get_data() - if ( - 0 in spd - and spd[0] is not None - and aux_names[0][1] in spd[0].dtype.names - ): - return spd - if hasattr(self, "aux"): - return self.aux.get_data() - return None - - def _boundnames_active(self): - if hasattr(self, "boundnames"): - if self.boundnames.get_data(): - return True - return False - - def check(self, f=None, verbose=True, level=1, checktype=None): - """ - Data check, returns True on success. - - Warning - ------- - The MF6 check mechanism is deprecated pending reimplementation - in a future release. While the checks API will remain in place - through 3.x, it may be unstable, and will likely change in 4.x. - """ - - if checktype is None: - checktype = mf6check - # do general checks - chk = super().check(f, verbose, level, checktype) - - # do mf6 specific checks - if hasattr(self, "auxiliary"): - # auxiliary variable check - # check if auxiliary variables are defined - aux_names = self.auxiliary.get_data() - if aux_names is not None and len(aux_names[0]) > 1: - num_aux_names = len(aux_names[0]) - 1 - # check for stress period data - aux_data = self._get_aux_data(aux_names) - if aux_data is not None and len(aux_data) > 0: - # make sure the check object exists - if chk is None: - chk = self._get_check(f, verbose, level, checktype) - if isinstance(aux_data, dict): - aux_datasets = list(aux_data.values()) - else: - aux_datasets = [aux_data] - dataset_type = "unknown" - for dataset in aux_datasets: - if isinstance(dataset, np.recarray): - dataset_type = "recarray" - break - elif isinstance(dataset, np.ndarray): - dataset_type = "ndarray" - break - # if aux data is in a list - if dataset_type == "recarray": - # check for time series data - time_series_name_dict = {} - if hasattr(self, "ts") and hasattr( - self.ts, "time_series_namerecord" - ): - # build dictionary of time series data variables - ts_nr = self.ts.time_series_namerecord.get_data() - if ts_nr is not None: - for item in ts_nr: - if len(item) > 0 and item[0] is not None: - time_series_name_dict[item[0]] = True - # auxiliary variables are last unless boundnames - # defined, then second to last - if self._boundnames_active(): - offset = 1 - else: - offset = 0 - - # loop through stress period datasets with aux data - for data in aux_datasets: - if isinstance(data, np.recarray): - for row in data: - row_size = len(row) - aux_start_loc = ( - row_size - num_aux_names - offset - 1 - ) - # loop through auxiliary variables - for idx, var in enumerate( - list(aux_names[0])[1:] - ): - # get index of current aux variable - data_index = aux_start_loc + idx - # verify auxiliary value is either - # numeric or time series variable - if ( - not datautil.DatumUtil.is_float( - row[data_index] - ) - and row[data_index] - not in time_series_name_dict - ): - desc = ( - f"Invalid non-numeric " - f"value " - f"'{row[data_index]}' " - f"in auxiliary data." - ) - chk._add_to_summary( - "Error", - desc=desc, - package=self.package_name, - ) - # else if stress period data is arrays - elif dataset_type == "ndarray": - # loop through auxiliary stress period datasets - for data in aux_datasets: - # verify auxiliary value is either numeric or time - # array series variable - if isinstance(data, np.ndarray): - val = np.isnan(np.sum(data)) - if val: - desc = ( - "One or more nan values were " - "found in auxiliary data." - ) - chk._add_to_summary( - "Warning", - desc=desc, - package=self.package_name, - ) - return chk - - def _get_nan_exclusion_list(self): - excl_list = [] - if hasattr(self, "stress_period_data"): - spd_struct = self.stress_period_data.structure - for item_struct in spd_struct.data_item_structures: - if item_struct.optional or item_struct.keystring_dict: - excl_list.append(item_struct.name) - return excl_list - - def _get_data_str(self, formal, show_data=True): - data_str = ( - "package_name = {}\nfilename = {}\npackage_type = {}" - "\nmodel_or_simulation_package = {}" - "\n{}_name = {}" - "\n".format( - self._get_pname(), - self._filename, - self.package_type, - self.model_or_sim.type.lower(), - self.model_or_sim.type.lower(), - self.model_or_sim.name, - ) - ) - if self.parent_file is not None and formal: - data_str = ( - f"{data_str}parent_file = {self.parent_file._get_pname()}\n\n" - ) - else: - data_str = f"{data_str}\n" - if show_data: - for block in self.blocks.values(): - if formal: - bl_repr = repr(block) - if len(bl_repr.strip()) > 0: - data_str = ( - "{}Block {}\n--------------------\n{}" "\n".format( - data_str, block.structure.name, repr(block) - ) - ) - else: - bl_str = str(block) - if len(bl_str.strip()) > 0: - data_str = ( - "{}Block {}\n--------------------\n{}" "\n".format( - data_str, block.structure.name, str(block) - ) - ) - return data_str - - def _get_pname(self): - if self.package_name is not None: - return str(self.package_name) - else: - return str(self._filename) - - def _get_block_header_info(self, line, path): - # init - header_variable_strs = [] - arr_clean_line = line.strip().split() - header_comment = MFComment( - "", path + (arr_clean_line[1],), self.simulation_data, 0 - ) - # break header into components - if len(arr_clean_line) < 2: - message = ( - "Block header does not contain a name. Name " - 'expected in line "{}".'.format(line) - ) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.model_name, - self._get_pname(), - self.path, - "parsing block header", - None, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self.simulation_data.debug, - ) - elif len(arr_clean_line) == 2: - return MFBlockHeader( - arr_clean_line[1], - header_variable_strs, - header_comment, - self.simulation_data, - path, - ) - else: - # process text after block name - comment = False - for entry in arr_clean_line[2:]: - # if start of comment - if MFComment.is_comment(entry.strip()[0]): - comment = True - if comment: - header_comment.text = " ".join( - [header_comment.text, entry] - ) - else: - header_variable_strs.append(entry) - return MFBlockHeader( - arr_clean_line[1], - header_variable_strs, - header_comment, - self.simulation_data, - path, - ) - - def _update_size_defs(self): - # build temporary data lookup by name - data_lookup = {} - for block in self.blocks.values(): - for dataset in block.datasets.values(): - data_lookup[dataset.structure.name] = dataset - - # loop through all data - for block in self.blocks.values(): - for dataset in block.datasets.values(): - # if data shape is 1-D - if ( - dataset.structure.shape - and len(dataset.structure.shape) == 1 - ): - # if shape name is data in this package - if dataset.structure.shape[0] in data_lookup: - size_def = data_lookup[dataset.structure.shape[0]] - size_def_name = size_def.structure.name - - if isinstance(dataset, mfdata.MFTransient): - # for transient data always use the maximum size - new_size = -1 - for key in dataset.get_active_key_list(): - try: - data = dataset.get_data(key=key[0]) - except (OSError, MFDataException): - # TODO: Handle case where external file - # path has been moved - data = None - if data is not None: - data_len = len(data) - if data_len > new_size: - new_size = data_len - else: - # for all other data set max to size - new_size = -1 - try: - data = dataset.get_data() - except (OSError, MFDataException): - # TODO: Handle case where external file - # path has been moved - data = None - if data is not None: - new_size = len(dataset.get_data()) - - if size_def.get_data() is None: - current_size = -1 - else: - current_size = size_def.get_data() - - if new_size > current_size: - # store current size - size_def.set_data(new_size) - - # informational message to the user - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - print( - "INFORMATION: {} in {} changed to {} " - "based on size of {}".format( - size_def_name, - size_def.structure.path[:-1], - new_size, - dataset.structure.name, - ) - ) - - def inspect_cells(self, cell_list, stress_period=None): - """ - Inspect model cells. Returns package data associated with cells. - - Parameters - ---------- - cell_list : list of tuples - List of model cells. Each model cell is a tuple of integers. - ex: [(1,1,1), (2,4,3)] - stress_period : int - For transient data, only return data from this stress period. If - not specified or None, all stress period data will be returned. - - Returns - ------- - output : array - Array containing inspection results - - """ - data_found = [] - - # loop through blocks - local_index_names = [] - local_index_blocks = [] - local_index_values = [] - local_index_cellids = [] - # loop through blocks in package - for block in self.blocks.values(): - # loop through data in block - for dataset in block.datasets.values(): - if isinstance(dataset, mfdatalist.MFList): - # handle list data - cellid_column = None - local_index_name = None - # loop through list data column definitions - for index, data_item in enumerate( - dataset.structure.data_item_structures - ): - if index == 0 and data_item.type == DatumType.integer: - local_index_name = data_item.name - # look for cellid column in list data row - if isinstance(data_item, MFDataItemStructure) and ( - data_item.is_cellid or data_item.possible_cellid - ): - cellid_column = index - break - if cellid_column is not None: - data_output = DataSearchOutput(dataset.path) - local_index_vals = [] - local_index_cells = [] - # get data - if isinstance(dataset, mfdatalist.MFTransientList): - # data may be in multiple transient blocks, get - # data from appropriate blocks - main_data = dataset.get_data(stress_period) - if stress_period is not None: - main_data = {stress_period: main_data} - else: - # data is all in one block, get data - main_data = {-1: dataset.get_data()} - - # loop through each dataset - for key, value in main_data.items(): - if value is None: - continue - if data_output.data_header is None: - data_output.data_header = value.dtype.names - # loop through list data rows - for line in value: - # loop through list of cells we are searching - # for - for cell in cell_list: - if isinstance( - line[cellid_column], tuple - ) and cellids_equal( - line[cellid_column], cell - ): - # save data found - data_output.data_entries.append(line) - data_output.data_entry_ids.append(cell) - data_output.data_entry_stress_period.append( - key - ) - if datautil.DatumUtil.is_int(line[0]): - # save index data for further - # processing. assuming index is - # always first entry - local_index_vals.append(line[0]) - local_index_cells.append(cell) - - if ( - local_index_name is not None - and len(local_index_vals) > 0 - ): - # capture index lookups for scanning related data - local_index_names.append(local_index_name) - local_index_blocks.append(block.path[-1]) - local_index_values.append(local_index_vals) - local_index_cellids.append(local_index_cells) - if len(data_output.data_entries) > 0: - data_found.append(data_output) - elif isinstance(dataset, mfdataarray.MFArray): - # handle array data - data_shape = copy.deepcopy( - dataset.structure.data_item_structures[0].shape - ) - if dataset.path[-1] == "top": - # top is a special case where the two datasets - # need to be combined to get the correct layer top - model_grid = self.model_or_sim.modelgrid - main_data = {-1: model_grid.top_botm} - data_shape.append("nlay") - else: - if isinstance(dataset, mfdataarray.MFTransientArray): - # data may be in multiple blocks, get data from - # appropriate blocks - main_data = dataset.get_data(stress_period) - if stress_period is not None: - main_data = {stress_period: main_data} - else: - # data is all in one block, get a process data - main_data = {-1: dataset.get_data()} - if main_data is None: - continue - data_output = DataSearchOutput(dataset.path) - # loop through datasets - for key, array_data in main_data.items(): - if array_data is None: - continue - self.model_or_sim.match_array_cells( - cell_list, data_shape, array_data, key, data_output - ) - if len(data_output.data_entries) > 0: - data_found.append(data_output) - - if len(local_index_names) > 0: - # look for data that shares the index value with data found - # for example a shared well or reach number - for block in self.blocks.values(): - # loop through data - for dataset in block.datasets.values(): - if isinstance(dataset, mfdatalist.MFList): - data_item = dataset.structure.data_item_structures[0] - data_output = DataSearchOutput(dataset.path) - # loop through previous data found - for ( - local_index_name, - local_index_vals, - cell_ids, - local_block_name, - ) in zip( - local_index_names, - local_index_values, - local_index_cellids, - local_index_blocks, - ): - if local_block_name == block.path[-1]: - continue - if ( - isinstance(data_item, MFDataItemStructure) - and data_item.name == local_index_name - and data_item.type == DatumType.integer - ): - # matching data index type found, get data - if isinstance( - dataset, mfdatalist.MFTransientList - ): - # data may be in multiple blocks, get data - # from appropriate blocks - main_data = dataset.get_data(stress_period) - if stress_period is not None: - main_data = {stress_period: main_data} - else: - # data is all in one block - main_data = {-1: dataset.get_data()} - # loop through the data - for key, value in main_data.items(): - if value is None: - continue - if data_output.data_header is None: - data_output.data_header = ( - value.dtype.names - ) - # loop through each row of data - for line in value: - # loop through the index values we are - # looking for - for index_val, cell_id in zip( - local_index_vals, cell_ids - ): - # try to match index values we are - # looking for to the data - if index_val == line[0]: - # save data found - data_output.data_entries.append( - line - ) - data_output.data_entry_ids.append( - index_val - ) - data_output.data_entry_cellids.append( - cell_id - ) - data_output.data_entry_stress_period.append( - key - ) - if len(data_output.data_entries) > 0: - data_found.append(data_output) - return data_found - - def remove(self): - """Removes this package from the simulation/model it is currently a - part of. - """ - self.model_or_sim.remove_package(self) - - def build_child_packages_container(self, pkg_type, filerecord): - """Builds a container object for any child packages. This method is - only intended for FloPy internal use.""" - # get package class - package_obj = PackageContainer.package_factory( - pkg_type, self.model_or_sim.model_type - ) - # create child package object - child_pkgs_name = f"utl{pkg_type}packages" - child_pkgs_obj = PackageContainer.package_factory(child_pkgs_name, "") - if child_pkgs_obj is None and self.model_or_sim.model_type is None: - # simulation level object, try just the package type in the name - child_pkgs_name = f"{pkg_type}packages" - child_pkgs_obj = PackageContainer.package_factory( - child_pkgs_name, "" - ) - if child_pkgs_obj is None: - # see if the package is part of one of the supported model types - for model_type in MFStructure().sim_struct.model_types: - child_pkgs_name = f"{model_type}{pkg_type}packages" - child_pkgs_obj = PackageContainer.package_factory( - child_pkgs_name, "" - ) - if child_pkgs_obj is not None: - break - child_pkgs = child_pkgs_obj( - self.model_or_sim, self, pkg_type, filerecord, None, package_obj - ) - setattr(self, pkg_type, child_pkgs) - self._child_package_groups[pkg_type] = child_pkgs - - def _get_dfn_name_dict(self): - dfn_name_dict = {} - item_num = 0 - for item in self.structure.dfn_list: - if len(item) > 1: - item_name = item[1].split() - if len(item_name) > 1 and item_name[0] == "name": - dfn_name_dict[item_name[1]] = item_num - item_num += 1 - return dfn_name_dict - - def build_child_package(self, pkg_type, data, parameter_name, filerecord): - """Builds a child package. This method is only intended for FloPy - internal use.""" - if not hasattr(self, pkg_type): - self.build_child_packages_container(pkg_type, filerecord) - if data is not None: - package_group = getattr(self, pkg_type) - # build child package file name - child_path = package_group.next_default_file_path() - # create new empty child package - package_obj = PackageContainer.package_factory( - pkg_type, self.model_or_sim.model_type - ) - package = package_obj( - self, filename=child_path, child_builder_call=True - ) - assert hasattr(package, parameter_name) - - if isinstance(data, dict): - # order data correctly - dfn_name_dict = package._get_dfn_name_dict() - ordered_data_items = [] - for key, value in data.items(): - if key in dfn_name_dict: - ordered_data_items.append( - [dfn_name_dict[key], key, value] - ) - else: - ordered_data_items.append([999999, key, value]) - ordered_data_items = sorted( - ordered_data_items, key=lambda x: x[0] - ) - - # evaluate and add data to package - unused_data = {} - for order, key, value in ordered_data_items: - # if key is an attribute of the child package - if isinstance(key, str) and hasattr(package, key): - # set child package attribute - child_data_attr = getattr(package, key) - if isinstance(child_data_attr, mfdatalist.MFList): - child_data_attr.set_data(value, autofill=True) - elif isinstance(child_data_attr, mfdata.MFData): - child_data_attr.set_data(value) - elif key == "fname" or key == "filename": - child_path = value - package._filename = value - else: - setattr(package, key, value) - else: - unused_data[key] = value - if unused_data: - setattr(package, parameter_name, unused_data) - else: - setattr(package, parameter_name, data) - - # append package to list - package_group.init_package(package, child_path) - return package - - def build_mfdata(self, var_name, data=None): - """Returns the appropriate data type object (mfdatalist, mfdataarray, - or mfdatascalar) given that object the appropriate structure (looked - up based on var_name) and any data supplied. This method is for - internal FloPy library use only. - - Parameters - ---------- - var_name : str - Variable name - - data : many supported types - Data contained in this object - - Returns - ------- - data object : MFData subclass - - """ - if self.loading_package: - data = None - for key, block in self.structure.blocks.items(): - if var_name in block.data_structures: - if block.name not in self.blocks: - self.blocks[block.name] = MFBlock( - self.simulation_data, - self.dimensions, - block, - self.path + (key,), - self.model_or_sim, - self, - ) - dataset_struct = block.data_structures[var_name] - var_path = self.path + (key, var_name) - ds = self.blocks[block.name].add_dataset( - dataset_struct, data, var_path - ) - self._data_list.append(ds) - return ds - - message = 'Unable to find variable "{}" in package ' '"{}".'.format( - var_name, self.package_type - ) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.model_name, - self._get_pname(), - self.path, - "building data objects", - None, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self.simulation_data.debug, - ) - - def set_model_relative_path(self, model_ws): - """Sets the model path relative to the simulation's path. - - Parameters - ---------- - model_ws : str - Model path relative to the simulation's path. - - """ - # update blocks - for key, block in self.blocks.items(): - block.set_model_relative_path(model_ws) - # update sub-packages - for package in self._package_container.packagelist: - package.set_model_relative_path(model_ws) - - def set_all_data_external( - self, - check_data=True, - external_data_folder=None, - base_name=None, - binary=False, - ): - """Sets the package's list and array data to be stored externally. - - Parameters - ---------- - check_data : bool - Determine if data error checking is enabled - external_data_folder - Folder where external data will be stored - base_name: str - Base file name prefix for all files - binary: bool - Whether file will be stored as binary - """ - # set blocks - for key, block in self.blocks.items(): - file_name = os.path.split(self.filename)[1] - if base_name is not None: - file_name = f"{base_name}_{file_name}" - block.set_all_data_external( - file_name, - check_data, - external_data_folder, - binary, - ) - # set sub-packages - for package in self._package_container.packagelist: - package.set_all_data_external( - check_data, - external_data_folder, - base_name, - binary, - ) - - def set_all_data_internal(self, check_data=True): - """Sets the package's list and array data to be stored internally. - - Parameters - ---------- - check_data : bool - Determine if data error checking is enabled - - """ - # set blocks - for key, block in self.blocks.items(): - block.set_all_data_internal(check_data) - # set sub-packages - for package in self._package_container.packagelist: - package.set_all_data_internal(check_data) - - def load(self, strict=True): - """Loads the package from file. - - Parameters - ---------- - strict : bool - Enforce strict checking of data. - - Returns - ------- - success : bool - - """ - # open file - try: - fd_input_file = open( - datautil.clean_filename(self.get_file_path()), "r" - ) - except OSError as e: - if e.errno == errno.ENOENT: - message = "File {} of type {} could not be opened.".format( - self.get_file_path(), self.package_type - ) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.model_name, - self.package_name, - self.path, - "loading package file", - None, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self.simulation_data.debug, - ) - - try: - self._load_blocks(fd_input_file, strict) - except ReadAsArraysException as err: - fd_input_file.close() - raise ReadAsArraysException(err) - # close file - fd_input_file.close() - - if self.simulation_data.auto_set_sizes: - self._update_size_defs() - - # return validity of file - return self.is_valid() - - def is_valid(self): - """Returns whether or not this package is valid. - - Returns - ------- - is valid : bool - - """ - # Check blocks - for block in self.blocks.values(): - # Non-optional blocks must be enabled - if ( - block.structure.number_non_optional_data() > 0 - and not block.enabled - and block.is_allowed() - ): - self.last_error = ( - f'Required block "{block.block_header.name}" not enabled' - ) - return False - # Enabled blocks must be valid - if block.enabled and not block.is_valid: - self.last_error = f'Invalid block "{block.block_header.name}"' - return False - - return True - - def _load_blocks(self, fd_input_file, strict=True, max_blocks=sys.maxsize): - # init - self.simulation_data.mfdata[self.path + ("pkg_hdr_comments",)] = ( - MFComment("", self.path, self.simulation_data) - ) - self.post_block_comments = MFComment( - "", self.path, self.simulation_data - ) - - blocks_read = 0 - found_first_block = False - line = " " - while line != "": - line = fd_input_file.readline() - clean_line = line.strip() - # If comment or empty line - if MFComment.is_comment(clean_line, True): - self._store_comment(line, found_first_block) - elif len(clean_line) > 4 and clean_line[:5].upper() == "BEGIN": - # parse block header - try: - block_header_info = self._get_block_header_info( - line, self.path - ) - except MFDataException as mfde: - message = ( - "An error occurred while loading block header " - 'in line "{}".'.format(line) - ) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.model_name, - self._get_pname(), - self.path, - "loading block header", - None, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self.simulation_data.debug, - mfde, - ) - - # if there is more than one possible block with the same name, - # resolve the correct block to use - block_key = block_header_info.name.lower() - block_num = 1 - possible_key = f"{block_header_info.name.lower()}-{block_num}" - if possible_key in self.blocks: - block_key = possible_key - block_header_name = block_header_info.name.lower() - while ( - block_key in self.blocks - and not self.blocks[block_key].is_allowed() - ): - block_key = f"{block_header_name}-{block_num}" - block_num += 1 - - if block_key not in self.blocks: - # block name not recognized, load block as comments and - # issue a warning - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - warning_str = ( - 'WARNING: Block "{}" is not a valid block ' - "name for file type " - "{}.".format(block_key, self.package_type) - ) - print(warning_str) - self._store_comment(line, found_first_block) - while line != "": - line = fd_input_file.readline() - self._store_comment(line, found_first_block) - arr_line = datautil.PyListUtil.split_data_line(line) - if arr_line and ( - len(arr_line[0]) <= 2 - or arr_line[0][:3].upper() == "END" - ): - break - else: - found_first_block = True - skip_block = False - cur_block = self.blocks[block_key] - if cur_block.loaded: - # Only blocks defined as repeating are allowed to have - # multiple entries - header_name = block_header_info.name - if not self.structure.blocks[ - header_name.lower() - ].repeating(): - # warn and skip block - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - warning_str = ( - 'WARNING: Block "{}" has ' - "multiple entries and is not " - "intended to be a repeating " - "block ({} package" - ")".format(header_name, self.package_type) - ) - print(warning_str) - skip_block = True - bhs = cur_block.structure.block_header_structure - bhval = block_header_info.variable_strings - if ( - len(bhs) > 0 - and len(bhval) > 0 - and bhs[0].name == "iper" - ): - nper = self.simulation_data.mfdata[ - ("tdis", "dimensions", "nper") - ].get_data() - bhval_int = datautil.DatumUtil.is_int(bhval[0]) - if not bhval_int or int(bhval[0]) > nper: - # skip block when block stress period is greater - # than nper - skip_block = True - - if not skip_block: - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print( - f" loading block {cur_block.structure.name}..." - ) - # reset comments - self.post_block_comments = MFComment( - "", self.path, self.simulation_data - ) - - cur_block.load( - block_header_info, fd_input_file, strict - ) - - # write post block comment - self.simulation_data.mfdata[ - cur_block.block_headers[-1].blk_post_comment_path - ] = self.post_block_comments - - blocks_read += 1 - if blocks_read >= max_blocks: - break - else: - # treat skipped block as if it is all comments - arr_line = datautil.PyListUtil.split_data_line( - clean_line - ) - self.post_block_comments.add_text(str(line), True) - while arr_line and ( - len(line) <= 2 or arr_line[0][:3].upper() != "END" - ): - line = fd_input_file.readline() - arr_line = datautil.PyListUtil.split_data_line( - line.strip() - ) - if arr_line: - self.post_block_comments.add_text( - str(line), True - ) - self.simulation_data.mfdata[ - cur_block.block_headers[-1].blk_post_comment_path - ] = self.post_block_comments - - else: - if not ( - len(clean_line) == 0 - or (len(line) > 2 and line[:3].upper() == "END") - ): - # Record file location of beginning of unresolved text - # treat unresolved text as a comment for now - self._store_comment(line, found_first_block) - - def write(self, ext_file_action=ExtFileAction.copy_relative_paths): - """Writes the package to a file. - - Parameters - ---------- - ext_file_action : ExtFileAction - How to handle pathing of external data files. - """ - if self.simulation_data.auto_set_sizes: - self._update_size_defs() - - # create any folders in path - package_file_path = self.get_file_path() - package_folder = os.path.split(package_file_path)[0] - if package_folder and not os.path.isdir(package_folder): - os.makedirs(os.path.split(package_file_path)[0]) - - # open file - fd = open(package_file_path, "w") - - # write flopy header - if self.simulation_data.write_headers: - dt = datetime.datetime.now() - header = ( - "# File generated by Flopy version {} on {} at {}." - "\n".format( - __version__, - dt.strftime("%m/%d/%Y"), - dt.strftime("%H:%M:%S"), - ) - ) - fd.write(header) - - # write blocks - self._write_blocks(fd, ext_file_action) - - fd.close() - - def create_package_dimensions(self): - """Creates a package dimensions object. For internal FloPy library - use. - - Returns - ------- - package dimensions : PackageDimensions - - """ - model_dims = None - if self.container_type[0] == PackageContainerType.model: - model_dims = [ - modeldimensions.ModelDimensions( - self.path[0], self.simulation_data - ) - ] - else: - # this is a simulation file that does not correspond to a specific - # model. figure out which model to use and return a dimensions - # object for that model - if self.dfn_file_name[0:3] == "exg": - exchange_rec_array = self.simulation_data.mfdata[ - ("nam", "exchanges", "exchanges") - ].get_data() - if exchange_rec_array is None: - return None - for exchange in exchange_rec_array: - if exchange[1].lower() == self._filename.lower(): - model_dims = [ - modeldimensions.ModelDimensions( - exchange[2], self.simulation_data - ), - modeldimensions.ModelDimensions( - exchange[3], self.simulation_data - ), - ] - break - elif ( - self.dfn_file_name[4:7] == "gnc" - and self.model_or_sim.type == "Simulation" - ): - # get exchange file name associated with gnc package - if self.parent_file is not None: - exg_file_name = self.parent_file.filename - else: - raise Exception( - "Can not create a simulation-level " - "gnc file without a corresponding " - "exchange file. Exchange file must be " - "created first." - ) - # get models associated with exchange file from sim nam file - try: - exchange_recarray_data = ( - self.model_or_sim.name_file.exchanges.get_data() - ) - except MFDataException as mfde: - message = ( - "An error occurred while retrieving exchange " - "data from the simulation name file. The error " - "occurred while processing gnc file " - f'"{self.filename}".' - ) - raise MFDataException( - mfdata_except=mfde, - package=self._get_pname(), - message=message, - ) - assert exchange_recarray_data is not None - model_1 = None - model_2 = None - for exchange in exchange_recarray_data: - if exchange[1] == exg_file_name: - model_1 = exchange[2] - model_2 = exchange[3] - - # assign models to gnc package - model_dims = [ - modeldimensions.ModelDimensions( - model_1, self.simulation_data - ), - modeldimensions.ModelDimensions( - model_2, self.simulation_data - ), - ] - elif self.parent_file is not None: - model_dims = [] - for md in self.parent_file.dimensions.model_dim: - model_name = md.model_name - model_dims.append( - modeldimensions.ModelDimensions( - model_name, self.simulation_data - ) - ) - else: - model_dims = [ - modeldimensions.ModelDimensions(None, self.simulation_data) - ] - return modeldimensions.PackageDimensions( - model_dims, self.structure, self.path - ) - - def _store_comment(self, line, found_first_block): - # Store comment - if found_first_block: - self.post_block_comments.text += line - else: - self.simulation_data.mfdata[ - self.path + ("pkg_hdr_comments",) - ].text += line - - def _write_blocks(self, fd, ext_file_action): - # verify that all blocks are valid - if not self.is_valid(): - message = ( - 'Unable to write out model file "{}" due to the ' - "following error: " - "{} ({})".format(self._filename, self.last_error, self.path) - ) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.model_name, - self._get_pname(), - self.path, - "writing package blocks", - None, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self.simulation_data.debug, - ) - - # write initial comments - pkg_hdr_comments_path = self.path + ("pkg_hdr_comments",) - if pkg_hdr_comments_path in self.simulation_data.mfdata: - self.simulation_data.mfdata[ - self.path + ("pkg_hdr_comments",) - ].write(fd, False) - - # loop through blocks - block_num = 1 - for block in self.blocks.values(): - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print(f" writing block {block.structure.name}...") - # write block - block.write(fd, ext_file_action=ext_file_action) - block_num += 1 - - def get_file_path(self): - """Returns the package file's path. - - Returns - ------- - file path : str - """ - if self.path[0] in self.simulation_data.mfpath.model_relative_path: - return os.path.join( - self.simulation_data.mfpath.get_model_path(self.path[0]), - self._filename, - ) - else: - return os.path.join( - self.simulation_data.mfpath.get_sim_path(), self._filename - ) - - def export(self, f, **kwargs): - """ - Method to export a package to netcdf or shapefile based on the - extension of the file name (.shp for shapefile, .nc for netcdf) - - Parameters - ---------- - f : str - Filename - kwargs : keyword arguments - modelgrid : flopy.discretization.Grid instance - User supplied modelgrid which can be used for exporting - in lieu of the modelgrid associated with the model object - - Returns - ------- - None or Netcdf object - - """ - from .. import export - - return export.utils.package_export(f, self, **kwargs) - - def plot(self, **kwargs): - """ - Plot 2-D, 3-D, transient 2-D, and stress period list (MfList) - package input data - - Parameters - ---------- - **kwargs : dict - filename_base : str - Base file name that will be used to automatically generate - file names for output image files. Plots will be exported as - image files if file_name_base is not None. (default is None) - file_extension : str - Valid matplotlib.pyplot file extension for savefig(). Only - used if filename_base is not None. (default is 'png') - mflay : int - MODFLOW zero-based layer number to return. If None, then all - all layers will be included. (default is None) - kper : int - MODFLOW zero-based stress period number to return. (default is - zero) - key : str - MfList dictionary key. (default is None) - - Returns - ------- - axes : list - Empty list is returned if filename_base is not None. Otherwise - a list of matplotlib.pyplot.axis are returned. - - """ - from ..plot.plotutil import PlotUtilities - - if not self.plottable: - raise TypeError("Simulation level packages are not plottable") - - axes = PlotUtilities._plot_package_helper(self, **kwargs) - return axes - - @staticmethod - def _add_netcdf_entries(attrs, mname, pname, data_item, auxiliary=None, mesh=None, nlay=1): - if auxiliary: - auxnames = auxiliary - else: - auxnames = [] - - def add_entry(tagname, iaux=None, layer=None): - key = tagname - name = f"{pname}" - if iaux is not None: - key = f"{key}/{iaux}" - name = f"{name}_{auxiliary[iaux]}" - else: - name = f"{name}_{tagname}" - if layer is not None: - key = f"{key}/layer{layer}" - name = f"{name}_l{layer}" - - a = {} - a["varname"] = name.lower() - a["attrs"] = {} - a["attrs"]["modflow_input"] = ( - f"{mname}/{pname}/{tagname}" - ).upper() - if iaux is not None: - a["attrs"]["modflow_iaux"] = iaux + 1 - if layer is not None: - a["attrs"]["layer"] = layer - attrs[key] = a - - if data_item.layered and mesh == "LAYERED": - if data_item.name == "aux" or data_item.name == "auxvar": - for n, auxname in enumerate(auxnames): - for l in range(nlay): - add_entry(data_item.name, n, l + 1) - else: - for l in range(nlay): - add_entry(data_item.name, layer=l + 1) - else: - if data_item.name == "aux" or data_item.name == "auxvar": - for n, auxname in enumerate(auxnames): - add_entry(data_item.name, iaux=n) - else: - add_entry(data_item.name) - - @staticmethod - def netcdf_attrs(mtype, ptype, auxiliary=None, mesh=None, nlay=1): - from .data.mfstructure import DfnPackage, MFSimulationStructure - - attrs = {} - sim_struct = MFSimulationStructure() - - for package in MFPackage.__subclasses__(): - sim_struct.process_dfn(DfnPackage(package)) - p = DfnPackage(package) - c, sc = p.dfn_file_name.split('.')[0].split('-') - if c == mtype.lower() and sc == ptype.lower(): - sim_struct.add_package(p, model_file=False) - exit - - if ptype.lower() in sim_struct.package_struct_objs: - pso = sim_struct.package_struct_objs[ptype.lower()] - for key, block in pso.blocks.items(): - if key != "griddata" and key != "period": - continue - for d in block.data_structures: - if (block.data_structures[d].netcdf): - MFPackage._add_netcdf_entries( - attrs, - mtype, - ptype, - block.data_structures[d], - auxiliary, - mesh, - nlay, - ) - - res_d = {} - for k in list(attrs): - res_d[k] = attrs[k]['attrs'] - - return res_d - - def netcdf_info(self, mesh=None): - attrs = {} - - if self.dimensions.get_aux_variables(): - auxnames = list(self.dimensions.get_aux_variables()[0]) - if len(auxnames) and auxnames[0] == "auxiliary": - auxnames.pop(0) - else: - auxnames = [] - - for key, block in self.blocks.items(): - if key != "griddata" and key != "period": - continue - for dataset in block.datasets.values(): - if isinstance(dataset, mfdataarray.MFArray): - for index, data_item in enumerate( - dataset.structure.data_item_structures - ): - if ( - dataset.structure.netcdf and - dataset.has_data() - ): - MFPackage._add_netcdf_entries( - attrs, - self.model_name, - self.package_name, - dataset.structure, - auxnames, - mesh, - self.model_or_sim.modelgrid.nlay, - ) - - return attrs - - -class MFChildPackages: - """ - Behind the scenes code for creating an interface to access child packages - from a parent package. This class is automatically constructed by the - FloPy library and is for internal library use only. - - Parameters - ---------- - """ - - def __init__( - self, - model_or_sim, - parent, - pkg_type, - filerecord, - package=None, - package_class=None, - ): - self._packages = [] - self._filerecord = filerecord - if package is not None: - self._packages.append(package) - self._model_or_sim = model_or_sim - self._cpparent = parent - self._pkg_type = pkg_type - self._package_class = package_class - - def __init_subclass__(cls): - """Register package""" - super().__init_subclass__() - PackageContainer.packages_by_abbr[cls.package_abbr] = cls - - def __getattr__(self, attr): - if ( - "_packages" in self.__dict__ - and len(self._packages) > 0 - and hasattr(self._packages[0], attr) - ): - item = getattr(self._packages[0], attr) - return item - raise AttributeError(attr) - - def __getitem__(self, k): - if isinstance(k, int): - if k < len(self._packages): - return self._packages[k] - raise ValueError(f"Package index {k} does not exist.") - - def __setattr__(self, key, value): - if ( - key != "_packages" - and key != "_model_or_sim" - and key != "_cpparent" - and key != "_inattr" - and key != "_filerecord" - and key != "_package_class" - and key != "_pkg_type" - ): - if len(self._packages) == 0: - raise Exception( - "No {} package is currently attached to package" - " {}. Use the initialize method to create a(n) " - "{} package before attempting to access its " - "properties.".format( - self._pkg_type, self._cpparent.filename, self._pkg_type - ) - ) - package = self._packages[0] - setattr(package, key, value) - return - super().__setattr__(key, value) - - def __default_file_path_base(self, file_path, suffix=""): - stem = os.path.split(file_path)[1] - stem_lst = stem.split(".") - file_name = ".".join(stem_lst[:-1]) - if len(stem_lst) > 1: - file_ext = stem_lst[-1] - return f"{file_name}.{file_ext}{suffix}.{self._pkg_type}" - elif suffix != "": - return f"{stem}.{self._pkg_type}" - else: - return f"{stem}.{suffix}.{self._pkg_type}" - - def __file_path_taken(self, possible_path): - for package in self._packages: - # Do case insensitive compare - if package.filename.lower() == possible_path.lower(): - return True - return False - - def next_default_file_path(self): - possible_path = self.__default_file_path_base(self._cpparent.filename) - suffix = 0 - while self.__file_path_taken(possible_path): - possible_path = self.__default_file_path_base( - self._cpparent.filename, suffix - ) - suffix += 1 - return possible_path - - def init_package(self, package, fname, remove_packages=True): - if remove_packages: - # clear out existing packages - self._remove_packages() - elif fname is not None: - self._remove_packages(fname) - if fname is None: - # build a file name - fname = self.next_default_file_path() - package._filename = fname - # check file record variable - found = False - fr_data = self._filerecord.get_data() - if fr_data is not None: - for line in fr_data: - if line[0] == fname: - found = True - if not found: - # append file record variable - self._filerecord.append_data([(fname,)]) - # add the package to the list - self._packages.append(package) - - def _update_filename(self, old_fname, new_fname): - file_record = self._filerecord.get_data() - new_file_record_data = [] - if file_record is not None: - file_record_data = file_record[0] - for item in file_record_data: - base, fname = os.path.split(item) - if fname.lower() == old_fname.lower(): - if base: - new_file_record_data.append( - (os.path.join(base, new_fname),) - ) - else: - new_file_record_data.append((new_fname,)) - else: - new_file_record_data.append((item,)) - else: - new_file_record_data.append((new_fname,)) - self._filerecord.set_data(new_file_record_data) - - def _append_package(self, package, fname, update_frecord=True): - if fname is None: - # build a file name - fname = self.next_default_file_path() - package._filename = fname - - if update_frecord: - # set file record variable - file_record = self._filerecord.get_data() - file_record_data = file_record - new_file_record_data = [] - for item in file_record_data: - new_file_record_data.append((item[0],)) - new_file_record_data.append((fname,)) - self._filerecord.set_data(new_file_record_data) - - for existing_pkg in self._packages: - if existing_pkg is package: - # do not add the same package twice - return - # add the package to the list - self._packages.append(package) - - def _remove_packages(self, fname=None, only_pop_from_list=False): - rp_list = [] - for idx, package in enumerate(self._packages): - if fname is None or package.filename == fname: - if not only_pop_from_list: - self._model_or_sim.remove_package(package) - rp_list.append(idx) - for idx in reversed(rp_list): - self._packages.pop(idx) diff --git a/flopy/mf6/tmp/ruff/mfmodel.py b/flopy/mf6/tmp/ruff/mfmodel.py deleted file mode 100644 index e27308cb02..0000000000 --- a/flopy/mf6/tmp/ruff/mfmodel.py +++ /dev/null @@ -1,2143 +0,0 @@ -import inspect -import os -import sys -import warnings -from typing import Optional, Union - -import numpy as np - -from ..discretization.grid import Grid -from ..discretization.modeltime import ModelTime -from ..discretization.structuredgrid import StructuredGrid -from ..discretization.unstructuredgrid import UnstructuredGrid -from ..discretization.vertexgrid import VertexGrid -from ..mbase import ModelInterface -from ..utils import datautil -from ..utils.check import mf6check -from .coordinates import modeldimensions -from .data import mfdata, mfdatalist, mfstructure -from .data.mfdatautil import DataSearchOutput, iterable -from .mfbase import ( - ExtFileAction, - FlopyException, - MFDataException, - MFFileMgmt, - PackageContainer, - PackageContainerType, - ReadAsArraysException, - VerbosityLevel, -) -from .mfpackage import MFPackage -from .utils.mfenums import DiscretizationType -from .utils.output_util import MF6Output - - -class MFModel(ModelInterface): - """ - MODFLOW-6 model base class. Represents a single model in a simulation. - - Parameters - ---------- - simulation_data : MFSimulationData - Simulation data object of the simulation this model will belong to - structure : MFModelStructure - Structure of this type of model - modelname : str - Name of the model - model_nam_file : str - Relative path to the model name file from model working folder - version : str - Version of modflow - exe_name : str - Model executable name - model_ws : str - Model working folder path - disfile : str - Relative path to dis file from model working folder - grid_type : str - Type of grid the model will use (structured, unstructured, vertices) - verbose : bool - Verbose setting for model operations (default False) - - Attributes - ---------- - name : str - Name of the model - exe_name : str - Model executable name - packages : dict of MFPackage - Dictionary of model packages - - """ - - def __init__( - self, - simulation, - model_type="gwf6", - modelname="model", - model_nam_file=None, - version="mf6", - exe_name="mf6", - add_to_simulation=True, - structure=None, - model_rel_path=".", - verbose=False, - **kwargs, - ): - self._package_container = PackageContainer(simulation.simulation_data) - self.simulation = simulation - self.simulation_data = simulation.simulation_data - self.name = modelname - self.name_file = None - self._version = version - self.model_type = model_type - self.type = "Model" - - if model_nam_file is None: - model_nam_file = f"{modelname}.nam" - - if add_to_simulation: - self.structure = simulation.register_model( - self, model_type, modelname, model_nam_file - ) - else: - self.structure = structure - self.set_model_relative_path(model_rel_path) - self.exe_name = exe_name - self.dimensions = modeldimensions.ModelDimensions( - self.name, self.simulation_data - ) - self.simulation_data.model_dimensions[modelname] = self.dimensions - self._ftype_num_dict = {} - self._package_paths = {} - self._verbose = verbose - - if model_nam_file is None: - self.model_nam_file = f"{modelname}.nam" - else: - self.model_nam_file = model_nam_file - - # check for spatial reference info in kwargs - xll = kwargs.pop("xll", None) - yll = kwargs.pop("yll", None) - self._xul = kwargs.pop("xul", None) - self._yul = kwargs.pop("yul", None) - rotation = kwargs.pop("rotation", 0.0) - crs = kwargs.pop("crs", None) - # build model grid object - self._modelgrid = Grid(crs=crs, xoff=xll, yoff=yll, angrot=rotation) - - self.start_datetime = None - # check for extraneous kwargs - if len(kwargs) > 0: - kwargs_str = ", ".join(kwargs.keys()) - excpt_str = f'Extraneous kwargs "{kwargs_str}" provided to MFModel.' - raise FlopyException(excpt_str) - - # build model name file - # create name file based on model type - support different model types - package_obj = PackageContainer.package_factory("nam", model_type[0:3]) - if not package_obj: - excpt_str = f"Name file could not be found for model{model_type[0:3]}." - raise FlopyException(excpt_str) - - self.name_file = package_obj( - self, - filename=self.model_nam_file, - pname=self.name, - _internal_package=True, - ) - - def __init_subclass__(cls): - """Register model type""" - super().__init_subclass__() - PackageContainer.modflow_models.append(cls) - PackageContainer.models_by_type[cls.model_type] = cls - - def __getattr__(self, item): - """ - __getattr__ - used to allow for getting packages as if they are - attributes - - Parameters - ---------- - item : str - 3 character package name (case insensitive) - - - Returns - ------- - pp : Package object - Package object of type :class:`flopy.pakbase.Package` - - """ - if item == "name_file" or not hasattr(self, "name_file"): - raise AttributeError(item) - - package = self.get_package(item) - if package is not None: - return package - raise AttributeError(item) - - def __setattr__(self, name, value): - if hasattr(self, name) and getattr(self, name) is not None: - attribute = object.__getattribute__(self, name) - if attribute is not None and isinstance(attribute, mfdata.MFData): - try: - if isinstance(attribute, mfdatalist.MFList): - attribute.set_data(value, autofill=True) - else: - attribute.set_data(value) - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self.name, - package="", - ) - return - super().__setattr__(name, value) - - def __repr__(self): - return self._get_data_str(True) - - def __str__(self): - return self._get_data_str(False) - - def _get_data_str(self, formal): - file_mgr = self.simulation_data.mfpath - data_str = ( - "name = {}\nmodel_type = {}\nversion = {}\nmodel_" - "relative_path = {}" - "\n\n".format( - self.name, - self.model_type, - self.version, - file_mgr.model_relative_path[self.name], - ) - ) - - for package in self.packagelist: - pk_str = package._get_data_str(formal, False) - if formal: - if len(pk_str.strip()) > 0: - data_str = ( - "{}###################\nPackage {}\n" - "###################\n\n" - "{}\n".format(data_str, package._get_pname(), pk_str) - ) - else: - pk_str = package._get_data_str(formal, False) - if len(pk_str.strip()) > 0: - data_str = ( - "{}###################\nPackage {}\n" - "###################\n\n" - "{}\n".format(data_str, package._get_pname(), pk_str) - ) - return data_str - - @property - def package_key_dict(self): - """ - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_type_dict - - @property - def package_dict(self): - """Returns a copy of the package name dictionary. - - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_dict - - @property - def package_names(self): - """Returns a list of package names. - - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_names - - @property - def package_type_dict(self): - """ - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_type_dict - - @property - def package_name_dict(self): - """ - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_name_dict - - @property - def package_filename_dict(self): - """ - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_filename_dict - - @property - def nper(self): - """Number of stress periods. - - Returns - ------- - nper : int - Number of stress periods in the simulation. - - """ - try: - return self.simulation.tdis.nper.array - except AttributeError: - return None - - @property - def modeltime(self): - """Model time discretization information. - - Returns - ------- - modeltime : ModelTime - FloPy object containing time discretization information for the - simulation. - - """ - tdis = self.simulation.get_package("tdis", type_only=True) - period_data = tdis.perioddata.get_data() - - # build steady state data - sto = self.get_package("sto", type_only=True) - if sto is None: - steady = np.full((len(period_data["perlen"])), True, dtype=bool) - else: - steady = np.full((len(period_data["perlen"])), False, dtype=bool) - ss_periods = sto.steady_state.get_active_key_dict() - for period, val in ss_periods.items(): - if val: - ss_periods[period] = sto.steady_state.get_data(period) - tr_periods = sto.transient.get_active_key_dict() - for period, val in tr_periods.items(): - if val: - tr_periods[period] = sto.transient.get_data(period) - if ss_periods: - last_ss_value = False - # loop through steady state array - for index, value in enumerate(steady): - # resolve if current index is steady state or transient - if index in ss_periods and ss_periods[index]: - last_ss_value = True - elif index in tr_periods and tr_periods[index]: - last_ss_value = False - if last_ss_value is True: - steady[index] = True - - # build model time - itmuni = tdis.time_units.get_data() - start_date_time = tdis.start_date_time.get_data() - - self._model_time = ModelTime( - perlen=period_data["perlen"], - nstp=period_data["nstp"], - tsmult=period_data["tsmult"], - time_units=itmuni, - start_datetime=start_date_time, - steady_state=steady, - ) - return self._model_time - - @property - def modeldiscrit(self): - """Basic model spatial discretization information. This is used - internally prior to model spatial discretization information being - fully loaded. - - Returns - ------- - model grid : Grid subclass - FloPy object containing basic spatial discretization information - for the model. - - """ - if self.get_grid_type() == DiscretizationType.DIS: - dis = self.get_package("dis") - return StructuredGrid( - nlay=dis.nlay.get_data(), - nrow=dis.nrow.get_data(), - ncol=dis.ncol.get_data(), - ) - elif self.get_grid_type() == DiscretizationType.DISV: - dis = self.get_package("disv") - return VertexGrid(ncpl=dis.ncpl.get_data(), nlay=dis.nlay.get_data()) - elif self.get_grid_type() == DiscretizationType.DISU: - dis = self.get_package("disu") - nodes = dis.nodes.get_data() - ncpl = np.array([nodes], dtype=int) - return UnstructuredGrid(ncpl=ncpl) - - @property - def modelgrid(self): - """Model spatial discretization information. - - Returns - ------- - model grid : Grid subclass - FloPy object containing spatial discretization information for the - model. - - """ - force_resync = False - if not self._mg_resync: - return self._modelgrid - if self.get_grid_type() == DiscretizationType.DIS: - dis = self.get_package("dis") - if not hasattr(dis, "_init_complete"): - if not hasattr(dis, "delr"): - # dis package has not yet been initialized - return self._modelgrid - else: - # dis package has been partially initialized - self._modelgrid = StructuredGrid( - delc=dis.delc.array, - delr=dis.delr.array, - top=None, - botm=None, - idomain=None, - lenuni=None, - crs=self._modelgrid.crs, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot, - ) - else: - botm = dis.botm.array - idomain = dis.idomain.array - if idomain is None: - force_resync = True - idomain = self._resolve_idomain(idomain, botm) - self._modelgrid = StructuredGrid( - delc=dis.delc.array, - delr=dis.delr.array, - top=dis.top.array, - botm=botm, - idomain=idomain, - lenuni=dis.length_units.array, - crs=self._modelgrid.crs, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot, - ) - elif self.get_grid_type() == DiscretizationType.DISV: - dis = self.get_package("disv") - if not hasattr(dis, "_init_complete"): - if not hasattr(dis, "cell2d"): - # disv package has not yet been initialized - return self._modelgrid - else: - # disv package has been partially initialized - self._modelgrid = VertexGrid( - vertices=dis.vertices.array, - cell2d=dis.cell2d.array, - top=None, - botm=None, - idomain=None, - lenuni=None, - crs=self._modelgrid.crs, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot, - ) - else: - botm = dis.botm.array - idomain = dis.idomain.array - if idomain is None: - force_resync = True - idomain = self._resolve_idomain(idomain, botm) - self._modelgrid = VertexGrid( - vertices=dis.vertices.array, - cell2d=dis.cell2d.array, - top=dis.top.array, - botm=botm, - idomain=idomain, - lenuni=dis.length_units.array, - crs=self._modelgrid.crs, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot, - ) - elif self.get_grid_type() == DiscretizationType.DISU: - dis = self.get_package("disu") - if not hasattr(dis, "_init_complete"): - # disu package has not yet been fully initialized - return self._modelgrid - - # check to see if ncpl can be constructed from ihc array, - # otherwise set ncpl equal to [nodes] - ihc = dis.ihc.array - iac = dis.iac.array - ncpl = UnstructuredGrid.ncpl_from_ihc(ihc, iac) - if ncpl is None: - ncpl = np.array([dis.nodes.get_data()], dtype=int) - cell2d = dis.cell2d.array - idomain = dis.idomain.array - if idomain is None: - idomain = np.ones(dis.nodes.array, dtype=int) - if cell2d is None: - if ( - self.simulation.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - print( - "WARNING: cell2d information missing. Functionality of " - "the UnstructuredGrid will be limited." - ) - - vertices = dis.vertices.array - if vertices is None: - if ( - self.simulation.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - print( - "WARNING: vertices information missing. Functionality " - "of the UnstructuredGrid will be limited." - ) - vertices = None - else: - vertices = np.array(vertices) - - self._modelgrid = UnstructuredGrid( - vertices=vertices, - cell2d=cell2d, - top=dis.top.array, - botm=dis.bot.array, - idomain=idomain, - lenuni=dis.length_units.array, - ncpl=ncpl, - crs=self._modelgrid.crs, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot, - iac=dis.iac.array, - ja=dis.ja.array, - ) - elif self.get_grid_type() == DiscretizationType.DISV1D: - dis = self.get_package("disv1d") - if not hasattr(dis, "_init_complete"): - if not hasattr(dis, "cell1d"): - # disv package has not yet been initialized - return self._modelgrid - else: - # disv package has been partially initialized - self._modelgrid = VertexGrid( - vertices=dis.vertices.array, - cell1d=dis.cell1d.array, - top=None, - botm=None, - idomain=None, - lenuni=None, - crs=self._modelgrid.crs, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot, - ) - else: - botm = dis.bottom.array - idomain = dis.idomain.array - if idomain is None: - force_resync = True - idomain = self._resolve_idomain(idomain, botm) - self._modelgrid = VertexGrid( - vertices=dis.vertices.array, - cell1d=dis.cell1d.array, - top=None, - botm=botm, - idomain=idomain, - lenuni=dis.length_units.array, - crs=self._modelgrid.crs, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot, - ) - elif self.get_grid_type() == DiscretizationType.DIS2D: - dis = self.get_package("dis2d") - if not hasattr(dis, "_init_complete"): - if not hasattr(dis, "delr"): - # dis package has not yet been initialized - return self._modelgrid - else: - # dis package has been partially initialized - self._modelgrid = StructuredGrid( - delc=dis.delc.array, - delr=dis.delr.array, - top=None, - botm=None, - idomain=None, - lenuni=None, - crs=self._modelgrid.crs, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot, - ) - else: - botm = dis.bottom.array - idomain = dis.idomain.array - if idomain is None: - force_resync = True - idomain = self._resolve_idomain(idomain, botm) - self._modelgrid = StructuredGrid( - delc=dis.delc.array, - delr=dis.delr.array, - top=None, - botm=botm, - idomain=idomain, - lenuni=dis.length_units.array, - crs=self._modelgrid.crs, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot, - ) - elif self.get_grid_type() == DiscretizationType.DISV2D: - dis = self.get_package("disv2d") - if not hasattr(dis, "_init_complete"): - if not hasattr(dis, "cell2d"): - # disv package has not yet been initialized - return self._modelgrid - else: - # disv package has been partially initialized - self._modelgrid = VertexGrid( - vertices=dis.vertices.array, - cell2d=dis.cell2d.array, - top=None, - botm=None, - idomain=None, - lenuni=None, - crs=self._modelgrid.crs, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot, - ) - else: - botm = dis.bottom.array - idomain = dis.idomain.array - if idomain is None: - force_resync = True - idomain = self._resolve_idomain(idomain, botm) - self._modelgrid = VertexGrid( - vertices=dis.vertices.array, - cell2d=dis.cell2d.array, - top=None, - botm=botm, - idomain=idomain, - lenuni=dis.length_units.array, - crs=self._modelgrid.crs, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot, - ) - else: - return self._modelgrid - - # get coordinate data from dis file - xorig = dis.xorigin.get_data() - yorig = dis.yorigin.get_data() - angrot = dis.angrot.get_data() - - # resolve offsets - if xorig is None: - xorig = self._modelgrid.xoffset - if xorig is None: - if self._xul is not None: - xorig = self._modelgrid._xul_to_xll(self._xul) - else: - xorig = 0.0 - if yorig is None: - yorig = self._modelgrid.yoffset - if yorig is None: - if self._yul is not None: - yorig = self._modelgrid._yul_to_yll(self._yul) - else: - yorig = 0.0 - if angrot is None: - angrot = self._modelgrid.angrot - self._modelgrid.set_coord_info( - xorig, - yorig, - angrot, - self._modelgrid.crs, - ) - self._mg_resync = not self._modelgrid.is_complete or force_resync - return self._modelgrid - - @property - def packagelist(self): - """List of model packages.""" - return self._package_container.packagelist - - @property - def namefile(self): - """Model namefile object.""" - return self.model_nam_file - - @property - def model_ws(self): - """Model file path.""" - file_mgr = self.simulation_data.mfpath - return file_mgr.get_model_path(self.name) - - @property - def exename(self): - """MODFLOW executable name""" - return self.exe_name - - @property - def version(self): - """Version of MODFLOW""" - return self._version - - @property - def solver_tols(self): - """Returns the solver inner hclose and rclose values. - - Returns - ------- - inner_hclose, rclose : float, float - - """ - ims = self.get_ims_package() - if ims is not None: - rclose = ims.rcloserecord.get_data() - if rclose is not None: - rclose = rclose[0][0] - return ims.inner_hclose.get_data(), rclose - return None - - @property - def laytyp(self): - """Layering type""" - try: - return self.npf.icelltype.array - except AttributeError: - return None - - @property - def hdry(self): - """Dry cell value""" - return -1e30 - - @property - def hnoflo(self): - """No-flow cell value""" - return 1e30 - - @property - def laycbd(self): - """Quasi-3D confining bed. Not supported in MODFLOW-6. - - Returns - ------- - None : None - - """ - return None - - @property - def output(self): - budgetkey = None - if self.model_type == "gwt6": - budgetkey = "MASS BUDGET FOR ENTIRE MODEL" - try: - return MF6Output(self.oc, budgetkey=budgetkey) - except AttributeError: - return MF6Output(self, budgetkey=budgetkey) - - def export(self, f, **kwargs): - """Method to export a model to a shapefile or netcdf file - - Parameters - ---------- - f : str - File name (".nc" for netcdf or ".shp" for shapefile) - or dictionary of .... - **kwargs : keyword arguments - modelgrid: flopy.discretization.Grid - User supplied modelgrid object which will supersede the built - in modelgrid object - if fmt is set to 'vtk', parameters of Vtk initializer - - """ - from ..export import utils - - return utils.model_export(f, self, **kwargs) - - def netcdf_attrs(self, mesh=None): - """Return dictionary of dataset (model) scoped attributes - Parameters - ---------- - mesh : str - mesh type if dataset is ugrid complient - """ - attrs = { - "modflow_grid": "", - "modflow_model": "", - } - if self.get_grid_type() == DiscretizationType.DIS: - attrs["modflow_grid"] = "STRUCTURED" - elif self.get_grid_type() == DiscretizationType.DISV: - attrs["modflow_grid"] = "VERTEX" - - attrs["modflow_model"] = ( - f"{self.name.upper()}: MODFLOW 6 {self.model_type.upper()[0:3]} model" - ) - - # supported => LAYERED - if mesh: - attrs["mesh"] = mesh - - return attrs - - @property - def verbose(self): - """Verbose setting for model operations (True/False)""" - return self._verbose - - @verbose.setter - def verbose(self, verbose): - """Verbose setting for model operations (True/False)""" - self._verbose = verbose - - def check(self, f=None, verbose=True, level=1): - """ - Check model data for common errors. - - Warning - ------- - The MF6 check mechanism is deprecated pending reimplementation - in a future release. While the checks API will remain in place - through 3.x, it may be unstable, and will likely change in 4.x. - - Parameters - ---------- - f : str or file handle - String defining file name or file handle for summary file - of check method output. If a string is passed a file handle - is created. If f is None, check method does not write - results to a summary file. (default is None) - verbose : bool - Boolean flag used to determine if check method results are - written to the screen - level : int - Check method analysis level. If level=0, summary checks are - performed. If level=1, full checks are performed. - - Returns - ------- - success : bool - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow.load('model.nam') - >>> m.check() - """ - - # check instance for model-level check - chk = mf6check(self, f=f, verbose=verbose, level=level) - - return self._check(chk, level) - - @staticmethod - def load_base( - cls_child, - simulation, - structure, - modelname="NewModel", - model_nam_file="modflowtest.nam", - mtype="gwf", - version="mf6", - exe_name: Union[str, os.PathLike] = "mf6", - strict=True, - model_rel_path=os.curdir, - load_only=None, - ): - """ - Class method that loads an existing model. - - Parameters - ---------- - simulation : MFSimulation - simulation object that this model is a part of - simulation_data : MFSimulationData - simulation data object - structure : MFModelStructure - structure of this type of model - model_name : str - name of the model - model_nam_file : str - relative path to the model name file from model working folder - version : str - version of modflow - exe_name : str or PathLike - model executable name or path - strict : bool - strict mode when loading files - model_rel_path : str - relative path of model folder to simulation folder - load_only : list - list of package abbreviations or package names corresponding to - packages that flopy will load. default is None, which loads all - packages. the discretization packages will load regardless of this - setting. subpackages, like time series and observations, will also - load regardless of this setting. - example list: ['ic', 'maw', 'npf', 'oc', 'my_well_package_1'] - - Returns - ------- - model : MFModel - - Examples - -------- - """ - instance = cls_child( - simulation, - modelname, - model_nam_file=model_nam_file, - version=version, - exe_name=exe_name, - add_to_simulation=False, - structure=structure, - model_rel_path=model_rel_path, - ) - - # build case consistent load_only dictionary for quick lookups - load_only = PackageContainer._load_only_dict(load_only) - - # load name file - instance.name_file.load(strict) - - # order packages - vnum = mfstructure.MFStructure().get_version_string() - # FIX: Transport - Priority packages maybe should not be hard coded - priority_packages = { - f"dis{vnum}": 1, - f"disv{vnum}": 1, - f"disu{vnum}": 1, - } - packages_ordered = [] - package_recarray = instance.simulation_data.mfdata[ - (modelname, "nam", "packages", "packages") - ] - if package_recarray.array is None: - return instance - - for item in package_recarray.get_data(): - if item[0] in priority_packages: - packages_ordered.insert(0, (item[0], item[1], item[2])) - else: - packages_ordered.append((item[0], item[1], item[2])) - - # load packages - sim_struct = mfstructure.MFStructure().sim_struct - instance._ftype_num_dict = {} - for ftype, fname, pname in packages_ordered: - ftype_orig = ftype - ftype = ftype[0:-1].lower() - if ( - ftype in structure.package_struct_objs - or ftype in sim_struct.utl_struct_objs - ): - if ( - load_only is not None - and not PackageContainer._in_pkg_list( - priority_packages, ftype_orig, pname - ) - and not PackageContainer._in_pkg_list(load_only, ftype_orig, pname) - ): - if ( - simulation.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - print(f" skipping package {ftype}...") - continue - if model_rel_path and model_rel_path != ".": - # strip off model relative path from the file path - filemgr = simulation.simulation_data.mfpath - fname = filemgr.strip_model_relative_path(modelname, fname) - if ( - simulation.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - print(f" loading package {ftype}...") - # load package - instance.load_package(ftype, fname, pname, strict, None) - sim_data = simulation.simulation_data - if ftype == "dis" and not sim_data.max_columns_user_set: - # set column wrap to ncol - dis = instance.get_package("dis", type_only=True) - if dis is not None and hasattr(dis, "ncol"): - sim_data.max_columns_of_data = dis.ncol.get_data() - sim_data.max_columns_user_set = False - sim_data.max_columns_auto_set = True - # load referenced packages - if modelname in instance.simulation_data.referenced_files: - for ref_file in instance.simulation_data.referenced_files[ - modelname - ].values(): - if ( - ref_file.file_type in structure.package_struct_objs - or ref_file.file_type in sim_struct.utl_struct_objs - ) and not ref_file.loaded: - instance.load_package( - ref_file.file_type, - ref_file.file_name, - None, - strict, - ref_file.reference_path, - ) - ref_file.loaded = True - - # TODO: fix jagged lists where appropriate - - return instance - - def inspect_cells( - self, - cell_list, - stress_period=None, - output_file_path=None, - inspect_budget=True, - inspect_dependent_var=True, - ): - """ - Inspect model cells. Returns model data associated with cells. - - Parameters - ---------- - cell_list : list of tuples - List of model cells. Each model cell is a tuple of integers. - ex: [(1,1,1), (2,4,3)] - stress_period : int - For transient data qnly return data from this stress period. If - not specified or None, all stress period data will be returned. - output_file_path: str - Path to output file that will contain the inspection results - inspect_budget: bool - Inspect budget file - inspect_dependent_var: bool - Inspect head file - Returns - ------- - output : dict - Dictionary containing inspection results - - Examples - -------- - - >>> import flopy - >>> sim = flopy.mf6.MFSimulationBase.load("name", "mf6", "mf6", ".") - >>> model = sim.get_model() - >>> inspect_list = [(2, 3, 2), (0, 4, 2), (0, 2, 4)] - >>> out_file = os.path.join("temp", "inspect_AdvGW_tidal.csv") - >>> model.inspect_cells(inspect_list, output_file_path=out_file) - """ - # handle no cell case - if cell_list is None or len(cell_list) == 0: - return None - - output_by_package = {} - # loop through all packages - for pp in self.packagelist: - # call the package's "inspect_cells" method - package_output = pp.inspect_cells(cell_list, stress_period) - if len(package_output) > 0: - output_by_package[f"{pp.package_name} package"] = package_output - # get dependent variable - if inspect_dependent_var: - try: - if self.model_type == "gwf6": - heads = self.output.head() - name = "heads" - elif self.model_type == "gwt6": - heads = self.output.concentration() - name = "concentration" - else: - inspect_dependent_var = False - except Exception: - inspect_dependent_var = False - if inspect_dependent_var and heads is not None: - kstp_kper_lst = heads.get_kstpkper() - data_output = DataSearchOutput((name,)) - data_output.output = True - for kstp_kper in kstp_kper_lst: - if stress_period is not None and stress_period != kstp_kper[1]: - continue - head_array = np.array(heads.get_data(kstpkper=kstp_kper)) - # flatten output data in disv and disu cases - if len(cell_list[0]) == 2: - head_array = head_array[0, :, :] - elif len(cell_list[0]) == 1: - head_array = head_array[0, 0, :] - # find data matches - self.match_array_cells( - cell_list, - head_array.shape, - head_array, - kstp_kper, - data_output, - ) - if len(data_output.data_entries) > 0: - output_by_package[f"{name} output"] = [data_output] - - # get model dimensions - model_shape = self.modelgrid.shape - - # get budgets - if inspect_budget: - try: - bud = self.output.budget() - except Exception: - inspect_budget = False - if inspect_budget and bud is not None: - kstp_kper_lst = bud.get_kstpkper() - rec_names = bud.get_unique_record_names() - budget_matches = [] - for rec_name in rec_names: - # clean up binary string name - string_name = str(rec_name)[3:-1].strip() - data_output = DataSearchOutput((string_name,)) - data_output.output = True - for kstp_kper in kstp_kper_lst: - if stress_period is not None and stress_period != kstp_kper[1]: - continue - budget_array = np.array( - bud.get_data( - kstpkper=kstp_kper, - text=rec_name, - full3D=True, - )[0] - ) - if len(budget_array.shape) == 4: - # get rid of 4th "time" dimension - budget_array = budget_array[0, :, :, :] - # flatten output data in disv and disu cases - if len(cell_list[0]) == 2 and len(budget_array.shape) >= 3: - budget_array = budget_array[0, :, :] - elif len(cell_list[0]) == 1 and len(budget_array.shape) >= 2: - budget_array = budget_array[0, :] - # find data matches - if budget_array.shape != model_shape: - # no support yet for different shaped budgets like - # flow_ja_face - continue - - self.match_array_cells( - cell_list, - budget_array.shape, - budget_array, - kstp_kper, - data_output, - ) - if len(data_output.data_entries) > 0: - budget_matches.append(data_output) - if len(budget_matches) > 0: - output_by_package["budget output"] = budget_matches - - if len(output_by_package) > 0 and output_file_path is not None: - with open(output_file_path, "w") as fd: - # write document header - fd.write(f"Inspect cell results for model {self.name}\n") - output = [] - for cell in cell_list: - output.append(" ".join([str(i) for i in cell])) - output = ",".join(output) - fd.write(f"Model cells inspected,{output}\n\n") - - for package_name, matches in output_by_package.items(): - fd.write(f"Results from {package_name}\n") - for search_output in matches: - # write header line with data name - fd.write(f",Results from {search_output.path_to_data[-1]}\n") - # write data header - if search_output.transient: - if search_output.output: - fd.write(",stress_period,time_step") - else: - fd.write(",stress_period/key") - if search_output.data_header is not None: - if len(search_output.data_entry_cellids) > 0: - fd.write(",cellid") - h_columns = ",".join(search_output.data_header) - fd.write(f",{h_columns}\n") - else: - fd.write(",cellid,data\n") - # write data found - for index, data_entry in enumerate(search_output.data_entries): - if search_output.transient: - sp = search_output.data_entry_stress_period[index] - if search_output.output: - fd.write(f",{sp[1]},{sp[0]}") - else: - fd.write(f",{sp}") - if search_output.data_header is not None: - if len(search_output.data_entry_cellids) > 0: - cells = search_output.data_entry_cellids[index] - output = " ".join([str(i) for i in cells]) - fd.write(f",{output}") - fd.write(self._format_data_entry(data_entry)) - else: - output = " ".join( - [ - str(i) - for i in search_output.data_entry_ids[index] - ] - ) - fd.write(f",{output}") - fd.write(self._format_data_entry(data_entry)) - fd.write("\n") - return output_by_package - - def match_array_cells(self, cell_list, data_shape, array_data, key, data_output): - # loop through list of cells we are searching for - for cell in cell_list: - if len(data_shape) == 3 or data_shape[0] == "nodes": - # data is by cell - if array_data.ndim == 3 and len(cell) == 3: - data_output.data_entries.append( - array_data[cell[0], cell[1], cell[2]] - ) - data_output.data_entry_ids.append(cell) - data_output.data_entry_stress_period.append(key) - elif array_data.ndim == 2 and len(cell) == 2: - data_output.data_entries.append(array_data[cell[0], cell[1]]) - data_output.data_entry_ids.append(cell) - data_output.data_entry_stress_period.append(key) - elif array_data.ndim == 1 and len(cell) == 1: - data_output.data_entries.append(array_data[cell[0]]) - data_output.data_entry_ids.append(cell) - data_output.data_entry_stress_period.append(key) - else: - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - warning_str = ( - 'WARNING: CellID "{}" not same ' - "number of dimensions as data " - "{}.".format(cell, data_output.path_to_data) - ) - print(warning_str) - elif len(data_shape) == 2: - # get data based on ncpl/lay - if array_data.ndim == 2 and len(cell) == 2: - data_output.data_entries.append(array_data[cell[0], cell[1]]) - data_output.data_entry_ids.append(cell) - data_output.data_entry_stress_period.append(key) - elif array_data.ndim == 1 and len(cell) == 1: - data_output.data_entries.append(array_data[cell[0]]) - data_output.data_entry_ids.append(cell) - data_output.data_entry_stress_period.append(key) - elif len(data_shape) == 1: - # get data based on nodes - if len(cell) == 1 and array_data.ndim == 1: - data_output.data_entries.append(array_data[cell[0]]) - data_output.data_entry_ids.append(cell) - data_output.data_entry_stress_period.append(key) - - @staticmethod - def _format_data_entry(data_entry): - output = "" - if iterable(data_entry, True): - for item in data_entry: - if isinstance(item, tuple): - formatted = " ".join([str(i) for i in item]) - output = f"{output},{formatted}" - else: - output = f"{output},{item}" - return f"{output}\n" - else: - return f",{data_entry}\n" - - def write(self, ext_file_action=ExtFileAction.copy_relative_paths): - """ - Writes out model's package files. - - Parameters - ---------- - ext_file_action : ExtFileAction - Defines what to do with external files when the simulation path has - changed. defaults to copy_relative_paths which copies only files - with relative paths, leaving files defined by absolute paths fixed. - - """ - - # write name file - if self.simulation_data.verbosity_level.value >= VerbosityLevel.normal.value: - print(" writing model name file...") - - self.name_file.write(ext_file_action=ext_file_action) - - if not self.simulation_data.max_columns_user_set: - grid_type = self.get_grid_type() - if grid_type == DiscretizationType.DIS: - self.simulation_data.max_columns_of_data = self.dis.ncol.get_data() - self.simulation_data.max_columns_user_set = False - self.simulation_data.max_columns_auto_set = True - - # write packages - for pp in self.packagelist: - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - print(f" writing package {pp._get_pname()}...") - pp.write(ext_file_action=ext_file_action) - - def get_grid_type(self): - """ - Return the type of grid used by model 'model_name' in simulation - containing simulation data 'simulation_data'. - - Returns - ------- - grid type : DiscretizationType - """ - package_recarray = self.name_file.packages - structure = mfstructure.MFStructure() - if ( - package_recarray.search_data(f"dis{structure.get_version_string()}", 0) - is not None - ): - return DiscretizationType.DIS - elif ( - package_recarray.search_data(f"disv{structure.get_version_string()}", 0) - is not None - ): - return DiscretizationType.DISV - elif ( - package_recarray.search_data(f"disu{structure.get_version_string()}", 0) - is not None - ): - return DiscretizationType.DISU - elif ( - package_recarray.search_data(f"disv1d{structure.get_version_string()}", 0) - is not None - ): - return DiscretizationType.DISV1D - elif ( - package_recarray.search_data(f"dis2d{structure.get_version_string()}", 0) - is not None - ): - return DiscretizationType.DIS2D - elif ( - package_recarray.search_data(f"disv2d{structure.get_version_string()}", 0) - is not None - ): - return DiscretizationType.DISV2D - - return DiscretizationType.UNDEFINED - - def get_ims_package(self): - """Get the IMS package associated with this model. - - Returns - ------- - IMS package : ModflowIms - """ - solution_group = self.simulation.name_file.solutiongroup.get_data(0) - for record in solution_group: - for name in record.dtype.names: - if name == "slntype" or name == "slnfname": - continue - if record[name] == self.name: - return self.simulation.get_solution_package(record.slnfname) - return None - - def get_steadystate_list(self): - """Returns a list of stress periods that are steady state. - - Returns - ------- - steady state list : list - - """ - ss_list = [] - tdis = self.simulation.get_package("tdis") - period_data = tdis.perioddata.get_data() - index = 0 - pd_len = len(period_data) - while index < pd_len: - ss_list.append(True) - index += 1 - - storage = self.get_package("sto", type_only=True) - if storage is not None: - tr_keys = storage.transient.get_keys(True) - ss_keys = storage.steady_state.get_keys(True) - for key in tr_keys: - ss_list[key] = False - for ss_list_key in range(key + 1, len(ss_list)): - for ss_key in ss_keys: - if ss_key == ss_list_key: - break - ss_list[key] = False - return ss_list - - def is_valid(self): - """ - Checks the validity of the model and all of its packages - - Returns - ------- - valid : bool - - """ - - # valid name file - if not self.name_file.is_valid(): - return False - - # valid packages - for pp in self.packagelist: - if not pp.is_valid(): - return False - - # required packages exist - for package_struct in self.structure.package_struct_objs.values(): - if ( - not package_struct.optional - and package_struct.file_type - not in self._package_container.package_type_dict - ): - return False - - return True - - def set_model_relative_path(self, model_ws): - """ - Sets the file path to the model folder relative to the simulation - folder and updates all model file paths, placing them in the model - folder. - - Parameters - ---------- - model_ws : str - Model working folder relative to simulation working folder - - """ - # set all data internal - self.set_all_data_internal(False) - - # update path in the file manager - file_mgr = self.simulation_data.mfpath - file_mgr.set_last_accessed_model_path() - path = model_ws - file_mgr.model_relative_path[self.name] = path - - if model_ws and model_ws != "." and self.simulation.name_file is not None: - model_folder_path = file_mgr.get_model_path(self.name) - if not os.path.exists(model_folder_path): - # make new model folder - os.makedirs(model_folder_path) - # update model name file location in simulation name file - models = self.simulation.name_file.models - models_data = models.get_data() - for index, entry in enumerate(models_data): - old_model_file_name = os.path.split(entry[1])[1] - old_model_base_name = os.path.splitext(old_model_file_name)[0] - if ( - old_model_base_name.lower() == self.name.lower() - or self.name == entry[2] - ): - models_data[index][1] = os.path.join(path, old_model_file_name) - break - models.set_data(models_data) - - if self.name_file is not None: - # update listing file location in model name file - list_file = self.name_file.list.get_data() - if list_file: - path, list_file_name = os.path.split(list_file) - try: - self.name_file.list.set_data(os.path.join(path, list_file_name)) - except MFDataException as mfde: - message = ( - "Error occurred while setting relative " - 'path "{}" in model ' - '"{}".'.format( - os.path.join(path, list_file_name), self.name - ) - ) - raise MFDataException( - mfdata_except=mfde, - model=self.model_name, - package=self.name_file._get_pname(), - message=message, - ) - # update package file locations in model name file - packages = self.name_file.packages - packages_data = packages.get_data() - if packages_data is not None: - for index, entry in enumerate(packages_data): - # get package object associated with entry - package = None - if len(entry) >= 3: - package = self.get_package(entry[2]) - if package is None: - package = self.get_package(entry[0]) - if package is not None: - # combine model relative path with package path - packages_data[index][1] = os.path.join( - path, package.filename - ) - else: - # package not found, create path based on - # information in name file - old_package_name = os.path.split(entry[1])[-1] - packages_data[index][1] = os.path.join( - path, old_package_name - ) - packages.set_data(packages_data) - # update files referenced from within packages - for package in self.packagelist: - package.set_model_relative_path(model_ws) - - def _remove_package_from_dictionaries(self, package): - # remove package from local dictionaries and lists - if package.path in self._package_paths: - del self._package_paths[package.path] - self._package_container.remove_package(package) - - def get_package(self, name=None, type_only=False, name_only=False): - """ - Finds a package by package name, package key, package type, or partial - package name. returns either a single package, a list of packages, - or None. - - Parameters - ---------- - name : str - Name or type of the package, 'my-riv-1, 'RIV', 'LPF', etc. - type_only : bool - Search for package by type only - name_only : bool - Search for package by name only - - Returns - ------- - pp : Package object - - """ - return self._package_container.get_package(name, type_only, name_only) - - def remove_package(self, package_name): - """ - Removes package and all child packages from the model. - `package_name` can be the package's name, type, or package object to - be removed from the model. - - Parameters - ---------- - package_name : str - Package name, package type, or package object to be removed from - the model. - - """ - if isinstance(package_name, MFPackage): - packages = [package_name] - else: - packages = self.get_package(package_name) - if not isinstance(packages, list) and packages is not None: - packages = [packages] - if packages is None: - return - for package in packages: - if package.model_or_sim.name != self.name: - except_text = ( - "Package can not be removed from model " - "{self.model_name} since it is not part of it." - ) - raise mfstructure.FlopyException(except_text) - - self._remove_package_from_dictionaries(package) - - try: - # remove package from name file - package_data = self.name_file.packages.get_data() - except MFDataException as mfde: - message = ( - "Error occurred while reading package names " - "from name file in model " - f'"{self.name}"' - ) - raise MFDataException( - mfdata_except=mfde, - model=self.model_name, - package=self.name_file._get_pname(), - message=message, - ) - try: - new_rec_array = None - for item in package_data: - filename = os.path.basename(item[1]) - if filename != package.filename: - if new_rec_array is None: - new_rec_array = np.rec.array( - [item.tolist()], package_data.dtype - ) - else: - new_rec_array = np.hstack((item, new_rec_array)) - except: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.structure.get_model(), - self.structure.get_package(), - self._path, - "building package recarray", - self.structure.name, - inspect.stack()[0][3], - type_, - value_, - traceback_, - None, - self.simulation_data.debug, - ) - try: - self.name_file.packages.set_data(new_rec_array) - except MFDataException as mfde: - message = ( - "Error occurred while setting package names " - f'from name file in model "{self.name}". Package name ' - f"data:\n{new_rec_array}" - ) - raise MFDataException( - mfdata_except=mfde, - model=self.model_name, - package=self.name_file._get_pname(), - message=message, - ) - - # build list of child packages - child_package_list = [] - for pkg in self.packagelist: - if pkg.parent_file is not None and pkg.parent_file.path == package.path: - child_package_list.append(pkg) - # remove child packages - for child_package in child_package_list: - self._remove_package_from_dictionaries(child_package) - - def update_package_filename(self, package, new_name): - """ - Updates the filename for a package. For internal flopy use only. - - Parameters - ---------- - package : MFPackage - Package object - new_name : str - New package name - """ - try: - # get namefile package data - package_data = self.name_file.packages.get_data() - except MFDataException as mfde: - message = ( - "Error occurred while updating package names " - "from name file in model " - f'"{self.name}".' - ) - raise MFDataException( - mfdata_except=mfde, - model=self.model_name, - package=self.name_file._get_pname(), - message=message, - ) - try: - file_mgr = self.simulation_data.mfpath - model_rel_path = file_mgr.model_relative_path[self.name] - # update namefile package data with new name - new_rec_array = None - old_leaf = os.path.split(package.filename)[1] - for item in package_data: - leaf = os.path.split(item[1])[1] - if leaf == old_leaf: - item[1] = os.path.join(model_rel_path, new_name) - - if new_rec_array is None: - new_rec_array = np.rec.array([item.tolist()], package_data.dtype) - else: - new_rec_array = np.hstack((item, new_rec_array)) - except: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.structure.get_model(), - self.structure.get_package(), - self._path, - "updating package filename", - self.structure.name, - inspect.stack()[0][3], - type_, - value_, - traceback_, - None, - self.simulation_data.debug, - ) - try: - self.name_file.packages.set_data(new_rec_array) - except MFDataException as mfde: - message = ( - "Error occurred while updating package names " - f'from name file in model "{self.name}". Package name ' - f"data:\n{new_rec_array}" - ) - raise MFDataException( - mfdata_except=mfde, - model=self.model_name, - package=self.name_file._get_pname(), - message=message, - ) - - def rename_all_packages(self, name): - """Renames all package files in the model. - - Parameters - ---------- - name : str - Prefix of package names. Packages files will be named - .. - - """ - nam_filename = f"{name}.nam" - self.simulation.rename_model_namefile(self, nam_filename) - self.name_file.filename = nam_filename - self.model_nam_file = nam_filename - package_type_count = {} - for package in self.packagelist: - if package.package_type not in package_type_count: - base_filename, leaf = os.path.split(package.filename) - lleaf = leaf.split(".") - if len(lleaf) > 1: - # keep existing extension - ext = lleaf[-1] - else: - # no extension found, create a new one - ext = package.package_type - new_fileleaf = f"{name}.{ext}" - if base_filename != "": - package.filename = os.path.join(base_filename, new_fileleaf) - else: - package.filename = new_fileleaf - package_type_count[package.package_type] = 1 - else: - package_type_count[package.package_type] += 1 - package.filename = "{}_{}.{}".format( - name, - package_type_count[package.package_type], - package.package_type, - ) - - def set_all_data_external( - self, - check_data=True, - external_data_folder=None, - base_name=None, - binary=False, - ): - """Sets the model's list and array data to be stored externally. - - Warning - ------- - The MF6 check mechanism is deprecated pending reimplementation - in a future release. While the checks API will remain in place - through 3.x, it may be unstable, and will likely change in 4.x. - - Parameters - ---------- - check_data : bool - Determines if data error checking is enabled during this - process. - external_data_folder - Folder, relative to the simulation path or model relative path - (see use_model_relative_path parameter), where external data - will be stored - base_name: str - Base file name prefix for all files - binary: bool - Whether file will be stored as binary - - """ - for package in self.packagelist: - package.set_all_data_external( - check_data, - external_data_folder, - base_name, - binary, - ) - - def set_all_data_internal(self, check_data=True): - """Sets the model's list and array data to be stored externally. - - Parameters - ---------- - check_data : bool - Determines if data error checking is enabled during this - process. - - """ - for package in self.packagelist: - package.set_all_data_internal(check_data) - - def register_package( - self, - package, - add_to_package_list=True, - set_package_name=True, - set_package_filename=True, - ): - """ - Registers a package with the model. This method is used internally - by FloPy and is not intended for use by the end user. - - Parameters - ---------- - package : MFPackage - Package to register - add_to_package_list : bool - Add package to lookup list - set_package_name : bool - Produce a package name for this package - set_package_filename : bool - Produce a filename for this package - - Returns - ------- - path, package structure : tuple, MFPackageStructure - - """ - package.container_type = [PackageContainerType.model] - if package.parent_file is not None: - path = package.parent_file.path + (package.package_type,) - else: - path = (self.name, package.package_type) - package_struct = self.structure.get_package_struct(package.package_type) - if add_to_package_list and path in self._package_paths: - if ( - package_struct is not None - and not package_struct.multi_package_support - and not isinstance(package.parent_file, MFPackage) - ): - # package of this type already exists, replace it - self.remove_package(package.package_type) - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - print( - "WARNING: Package with type {} already exists. " - "Replacing existing package" - ".".format(package.package_type) - ) - elif ( - not set_package_name - and package.package_name in self._package_container.package_name_dict - ): - # package of this type with this name already - # exists, replace it - self.remove_package( - self._package_container.package_name_dict[package.package_name] - ) - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - print( - "WARNING: Package with name {} already exists. " - "Replacing existing package" - ".".format(package.package_name) - ) - - # make sure path is unique - if path in self._package_paths: - path_iter = datautil.PathIter(path) - for new_path in path_iter: - if new_path not in self._package_paths: - path = new_path - break - self._package_paths[path] = 1 - - if package.package_type.lower() == "nam": - if not package.internal_package: - excpt_str = ( - "Unable to register nam file. Do not create your own nam " - "files. Nam files are automatically created and managed " - "for you by FloPy." - ) - print(excpt_str) - raise FlopyException(excpt_str) - - return path, self.structure.name_file_struct_obj - - package_extension = package.package_type - if set_package_name: - # produce a default package name - if package_struct is not None and package_struct.multi_package_support: - # check for other registered packages of this type - name_iter = datautil.NameIter(package.package_type, False) - for package_name in name_iter: - if package_name not in self._package_container.package_name_dict: - package.package_name = package_name - suffix = package_name.split("_") - if ( - len(suffix) > 1 - and datautil.DatumUtil.is_int(suffix[-1]) - and suffix[-1] != "0" - ): - # update file extension to make unique - package_extension = f"{package_extension}_{suffix[-1]}" - break - else: - package.package_name = package.package_type - - if set_package_filename: - # filename uses model base name - package._filename = f"{self.name}.{package.package_type}" - if package._filename in self._package_container.package_filename_dict: - # auto generate a unique file name and register it - file_name = MFFileMgmt.unique_file_name( - package._filename, - self._package_container.package_filename_dict, - ) - package._filename = file_name - - if add_to_package_list: - self._package_container.add_package(package) - - # add obs file to name file if it does not have a parent - if package.package_type in self.structure.package_struct_objs or ( - package.package_type == "obs" and package.parent_file is None - ): - # update model name file - pkg_type = package.package_type.upper() - if ( - package.package_type != "obs" - and self.structure.package_struct_objs[ - package.package_type - ].read_as_arrays - ): - pkg_type = pkg_type[0:-1] - # Model Assumption - assuming all name files have a package - # recarray - file_mgr = self.simulation_data.mfpath - model_rel_path = file_mgr.model_relative_path[self.name] - if model_rel_path != ".": - package_rel_path = os.path.join(model_rel_path, package.filename) - else: - package_rel_path = package.filename - self.name_file.packages.update_record( - [ - f"{pkg_type}6", - package_rel_path, - package.package_name, - ], - 0, - ) - if package_struct is not None: - return (path, package_struct) - else: - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - print( - "WARNING: Unable to register unsupported file type {} " - "for model {}.".format(package.package_type, self.name) - ) - return None, None - - def load_package( - self, - ftype, - fname, - pname, - strict, - ref_path, - dict_package_name=None, - parent_package: Optional[MFPackage] = None, - ): - """ - Loads a package from a file. This method is used internally by FloPy - and is not intended for the end user. - - Parameters - ---------- - ftype : str - the file type - fname : str - the name of the file containing the package input - pname : str - the user-defined name for the package - strict : bool - strict mode when loading the file - ref_path : str - path to the file. uses local path if set to None - dict_package_name : str - package name for dictionary lookup - parent_package : MFPackage - parent package - - Examples - -------- - """ - if ref_path is not None: - fname = os.path.join(ref_path, fname) - sim_struct = mfstructure.MFStructure().sim_struct - if ( - ftype in self.structure.package_struct_objs - and self.structure.package_struct_objs[ftype].multi_package_support - ) or ( - ftype in sim_struct.utl_struct_objs - and sim_struct.utl_struct_objs[ftype].multi_package_support - ): - # resolve dictionary name for package - if dict_package_name is not None: - if parent_package is not None: - dict_package_name = f"{parent_package.path[-1]}_{ftype}" - else: - # use dict_package_name as the base name - if ftype in self._ftype_num_dict: - self._ftype_num_dict[dict_package_name] += 1 - else: - self._ftype_num_dict[dict_package_name] = 0 - dict_package_name = "{}_{}".format( - dict_package_name, - self._ftype_num_dict[dict_package_name], - ) - else: - # use ftype as the base name - if ftype in self._ftype_num_dict: - self._ftype_num_dict[ftype] += 1 - else: - self._ftype_num_dict[ftype] = 1 - if pname is not None: - dict_package_name = pname - else: - dict_package_name = f"{ftype}-{self._ftype_num_dict[ftype]}" - else: - dict_package_name = ftype - - # clean up model type text - model_type = self.structure.model_type - while datautil.DatumUtil.is_int(model_type[-1]): - model_type = model_type[0:-1] - - # create package - package_obj = PackageContainer.package_factory(ftype, model_type) - package = package_obj( - self, - filename=fname, - pname=dict_package_name, - loading_package=True, - parent_file=parent_package, - _internal_package=True, - ) - try: - package.load(strict) - except ReadAsArraysException: - # create ReadAsArrays package and load it instead - package_obj = PackageContainer.package_factory(f"{ftype}a", model_type) - package = package_obj( - self, - filename=fname, - pname=dict_package_name, - loading_package=True, - parent_file=parent_package, - _internal_package=True, - ) - package.load(strict) - - # register child package with the model - self._package_container.add_package(package) - if parent_package is not None: - # register child package with the parent package - parent_package.add_package(package) - - return package - - def plot(self, SelPackList=None, **kwargs): - """ - Plot 2-D, 3-D, transient 2-D, and stress period list (MfList) - model input data from a model instance - - Args: - model: Flopy model instance - SelPackList: (list) list of package names to plot, if none - all packages will be plotted - - **kwargs : dict - filename_base : str - Base file name that will be used to automatically generate file - names for output image files. Plots will be exported as image - files if file_name_base is not None. (default is None) - file_extension : str - Valid matplotlib.pyplot file extension for savefig(). Only used - if filename_base is not None. (default is 'png') - mflay : int - MODFLOW zero-based layer number to return. If None, then all - all layers will be included. (default is None) - kper : int - MODFLOW zero-based stress period number to return. - (default is zero) - key : str - MfList dictionary key. (default is None) - - Returns: - axes : list - Empty list is returned if filename_base is not None. Otherwise - a list of matplotlib.pyplot.axis are returned. - """ - from ..plot.plotutil import PlotUtilities - - axes = PlotUtilities._plot_model_helper(self, SelPackList=SelPackList, **kwargs) - - return axes - - @staticmethod - def _resolve_idomain(idomain, botm): - if idomain is None: - if botm is None: - return idomain - else: - return np.ones_like(botm) - return idomain diff --git a/flopy/mf6/tmp/ruff/mfpackage.py b/flopy/mf6/tmp/ruff/mfpackage.py deleted file mode 100644 index 6340552af9..0000000000 --- a/flopy/mf6/tmp/ruff/mfpackage.py +++ /dev/null @@ -1,3497 +0,0 @@ -import copy -import datetime -import errno -import inspect -import os -import sys -import warnings - -import numpy as np - -from ..mbase import ModelInterface -from ..pakbase import PackageInterface -from ..utils import datautil -from ..utils.check import mf6check -from ..version import __version__ -from .coordinates import modeldimensions -from .data import ( - mfdata, - mfdataarray, - mfdatalist, - mfdataplist, - mfdatascalar, - mfstructure, -) -from .data.mfdatautil import DataSearchOutput, MFComment, cellids_equal -from .data.mfstructure import DatumType, MFDataItemStructure, MFStructure -from .mfbase import ( - ExtFileAction, - FlopyException, - MFDataException, - MFFileMgmt, - MFInvalidTransientBlockHeaderException, - PackageContainer, - PackageContainerType, - ReadAsArraysException, - VerbosityLevel, -) -from .utils.output_util import MF6Output - - -class MFBlockHeader: - """ - Represents the header of a block in a MF6 input file. This class is used - internally by FloPy and its direct use by a user of this library is not - recommend. - - Parameters - ---------- - name : str - Block name - variable_strings : list - List of strings that appear after the block name - comment : MFComment - Comment text in the block header - - Attributes - ---------- - name : str - Block name - variable_strings : list - List of strings that appear after the block name - comment : MFComment - Comment text in the block header - data_items : list - List of MFVariable of the variables contained in this block - - """ - - def __init__( - self, - name, - variable_strings, - comment, - simulation_data=None, - path=None, - block=None, - ): - self.name = name - self.variable_strings = variable_strings - self.block = block - if not ( - (simulation_data is None and path is None) - or (simulation_data is not None and path is not None) - ): - raise FlopyException( - "Block header must be initialized with both " - "simulation_data and path or with neither." - ) - if simulation_data is None: - self.comment = comment - self.simulation_data = None - self.path = path - self.comment_path = None - else: - self.connect_to_dict(simulation_data, path, comment) - # TODO: Get data_items from dictionary - self.data_items = [] - # build block comment paths - self.blk_trailing_comment_path = ("blk_trailing_comment",) - self.blk_post_comment_path = ("blk_post_comment",) - if isinstance(path, list): - path = tuple(path) - if path is not None: - self.blk_trailing_comment_path = path + ( - name, - "blk_trailing_comment", - ) - self.blk_post_comment_path = path + ( - name, - "blk_post_comment", - ) - if self.blk_trailing_comment_path not in simulation_data.mfdata: - simulation_data.mfdata[self.blk_trailing_comment_path] = MFComment( - "", "", simulation_data, 0 - ) - if self.blk_post_comment_path not in simulation_data.mfdata: - simulation_data.mfdata[self.blk_post_comment_path] = MFComment( - "\n", "", simulation_data, 0 - ) - else: - self.blk_trailing_comment_path = ("blk_trailing_comment",) - self.blk_post_comment_path = ("blk_post_comment",) - - def __lt__(self, other): - transient_key = self.get_transient_key() - if transient_key is None: - return True - else: - other_key = other.get_transient_key() - if other_key is None: - return False - else: - return transient_key < other_key - - def build_header_variables( - self, - simulation_data, - block_header_structure, - block_path, - data, - dimensions, - ): - """Builds data objects to hold header variables.""" - self.data_items = [] - var_path = block_path + (block_header_structure[0].name,) - - # fix up data - fixed_data = [] - if block_header_structure[0].data_item_structures[0].type == DatumType.keyword: - data_item = block_header_structure[0].data_item_structures[0] - fixed_data.append(data_item.name) - if isinstance(data, tuple): - data = list(data) - if isinstance(data, list): - fixed_data = fixed_data + data - else: - fixed_data.append(data) - if len(fixed_data) > 0: - fixed_data = [tuple(fixed_data)] - # create data object - new_data = self.block.data_factory( - simulation_data, - None, - block_header_structure[0], - True, - var_path, - dimensions, - fixed_data, - ) - - self.add_data_item(new_data, data) - - def add_data_item(self, new_data, data): - """Adds data to the block.""" - self.data_items.append(new_data) - while isinstance(data, list): - if len(data) > 0: - data = data[0] - else: - data = None - if not isinstance(data, tuple): - data = (data,) - self.blk_trailing_comment_path += data - self.blk_post_comment_path += data - - def is_same_header(self, block_header): - """Checks if `block_header` is the same header as this header.""" - if len(self.variable_strings) > 0: - if len(self.variable_strings) != len(block_header.variable_strings): - return False - else: - for sitem, oitem in zip( - self.variable_strings, block_header.variable_strings - ): - if sitem != oitem: - return False - return True - elif len(self.data_items) > 0 and len(block_header.variable_strings) > 0: - typ_obj = self.data_items[0].structure.data_item_structures[0].type_obj - if typ_obj == int or typ_obj == float: - return bool( - self.variable_strings[0] == block_header.variable_strings[0] - ) - else: - return True - elif len(self.data_items) == len(block_header.variable_strings): - return True - return False - - def get_comment(self): - """Get block header comment""" - if self.simulation_data is None: - return self.comment - else: - return self.simulation_data.mfdata[self.comment_path] - - def connect_to_dict(self, simulation_data, path, comment=None): - """Add comment to the simulation dictionary""" - self.simulation_data = simulation_data - self.path = path - self.comment_path = path + ("blk_hdr_comment",) - if comment is None: - simulation_data.mfdata[self.comment_path] = self.comment - else: - simulation_data.mfdata[self.comment_path] = comment - self.comment = None - - def write_header(self, fd): - """Writes block header to file object `fd`. - - Parameters - ---------- - fd : file object - File object to write block header to. - - """ - fd.write(f"BEGIN {self.name}") - if len(self.data_items) > 0: - if isinstance(self.data_items[0], mfdatascalar.MFScalar): - one_based = self.data_items[0].structure.type == DatumType.integer - entry = self.data_items[0].get_file_entry( - values_only=True, one_based=one_based - ) - else: - entry = self.data_items[0].get_file_entry() - fd.write(str(entry.rstrip())) - if len(self.data_items) > 1: - for data_item in self.data_items[1:]: - entry = data_item.get_file_entry(values_only=True) - fd.write(str(entry).rstrip()) - if self.get_comment().text: - fd.write(" ") - self.get_comment().write(fd) - fd.write("\n") - - def write_footer(self, fd): - """Writes block footer to file object `fd`. - - Parameters - ---------- - fd : file object - File object to write block footer to. - - """ - fd.write(f"END {self.name}") - if len(self.data_items) > 0: - one_based = self.data_items[0].structure.type == DatumType.integer - if isinstance(self.data_items[0], mfdatascalar.MFScalar): - entry = self.data_items[0].get_file_entry( - values_only=True, one_based=one_based - ) - else: - entry = self.data_items[0].get_file_entry() - fd.write(str(entry.rstrip())) - fd.write("\n") - - def get_transient_key(self, data_path=None): - """Get transient key associated with this block header.""" - transient_key = None - for index in range(0, len(self.data_items)): - if self.data_items[index].structure.type != DatumType.keyword: - if data_path == self.data_items[index].path: - # avoid infinite recursion - return True - transient_key = self.data_items[index].get_data() - if isinstance(transient_key, np.recarray): - item_struct = self.data_items[index].structure - key_index = item_struct.first_non_keyword_index() - if not ( - key_index is not None and len(transient_key[0]) > key_index - ): - if key_index is None: - raise FlopyException( - "Block header index could not be determined." - ) - else: - raise FlopyException( - 'Block header index "{}" ' - 'must be less than "{}"' - ".".format(key_index, len(transient_key[0])) - ) - transient_key = transient_key[0][key_index] - break - return transient_key - - -class MFBlock: - """ - Represents a block in a MF6 input file. This class is used internally - by FloPy and use by users of the FloPy library is not recommended. - - Parameters - ---------- - simulation_data : MFSimulationData - Data specific to this simulation - dimensions : MFDimensions - Describes model dimensions including model grid and simulation time - structure : MFVariableStructure - Structure describing block - path : tuple - Unique path to block - - Attributes - ---------- - block_headers : MFBlockHeader - Block header text (BEGIN/END), header variables, comments in the - header - structure : MFBlockStructure - Structure describing block - path : tuple - Unique path to block - datasets : OrderDict - Dictionary of dataset objects with keys that are the name of the - dataset - datasets_keyword : dict - Dictionary of dataset objects with keys that are key words to identify - start of dataset - enabled : bool - If block is being used in the simulation - - """ - - def __init__( - self, - simulation_data, - dimensions, - structure, - path, - model_or_sim, - container_package, - ): - self._simulation_data = simulation_data - self._dimensions = dimensions - self._model_or_sim = model_or_sim - self._container_package = container_package - self.block_headers = [ - MFBlockHeader( - structure.name, - [], - MFComment("", path, simulation_data, 0), - simulation_data, - path, - self, - ) - ] - self.structure = structure - self.path = path - self.datasets = {} - self.datasets_keyword = {} - # initially disable if optional - self.enabled = structure.number_non_optional_data() > 0 - self.loaded = False - self.external_file_name = None - self._structure_init() - - def __repr__(self): - return self._get_data_str(True) - - def __str__(self): - return self._get_data_str(False) - - def _get_data_str(self, formal): - data_str = "" - for dataset in self.datasets.values(): - if formal: - ds_repr = repr(dataset) - if len(ds_repr.strip()) > 0: - data_str = f"{data_str}{dataset.structure.name}\n{dataset!r}\n" - else: - ds_str = str(dataset) - if len(ds_str.strip()) > 0: - data_str = f"{data_str}{dataset.structure.name}\n{dataset!s}\n" - return data_str - - # return an MFScalar, MFList, or MFArray - def data_factory( - self, - sim_data, - model_or_sim, - structure, - enable, - path, - dimensions, - data=None, - package=None, - ): - """Creates the appropriate data child object derived from MFData.""" - data_type = structure.get_datatype() - # examine the data structure and determine the data type - if ( - data_type == mfstructure.DataType.scalar_keyword - or data_type == mfstructure.DataType.scalar - ): - return mfdatascalar.MFScalar( - sim_data, - model_or_sim, - structure, - data, - enable, - path, - dimensions, - ) - elif ( - data_type == mfstructure.DataType.scalar_keyword_transient - or data_type == mfstructure.DataType.scalar_transient - ): - trans_scalar = mfdatascalar.MFScalarTransient( - sim_data, model_or_sim, structure, enable, path, dimensions - ) - if data is not None: - trans_scalar.set_data(data, key=0) - return trans_scalar - elif data_type == mfstructure.DataType.array: - return mfdataarray.MFArray( - sim_data, - model_or_sim, - structure, - data, - enable, - path, - dimensions, - self, - ) - elif data_type == mfstructure.DataType.array_transient: - trans_array = mfdataarray.MFTransientArray( - sim_data, - model_or_sim, - structure, - enable, - path, - dimensions, - self, - ) - if data is not None: - trans_array.set_data(data, key=0) - return trans_array - elif data_type == mfstructure.DataType.list: - if ( - structure.basic_item - and self._container_package.package_type.lower() != "nam" - and self._simulation_data.use_pandas - ): - return mfdataplist.MFPandasList( - sim_data, - model_or_sim, - structure, - data, - enable, - path, - dimensions, - package, - self, - ) - else: - return mfdatalist.MFList( - sim_data, - model_or_sim, - structure, - data, - enable, - path, - dimensions, - package, - self, - ) - elif data_type == mfstructure.DataType.list_transient: - if structure.basic_item and self._simulation_data.use_pandas: - trans_list = mfdataplist.MFPandasTransientList( - sim_data, - model_or_sim, - structure, - enable, - path, - dimensions, - package, - self, - ) - else: - trans_list = mfdatalist.MFTransientList( - sim_data, - model_or_sim, - structure, - enable, - path, - dimensions, - package, - self, - ) - if data is not None: - trans_list.set_data(data, key=0, autofill=True) - return trans_list - elif data_type == mfstructure.DataType.list_multiple: - mult_list = mfdatalist.MFMultipleList( - sim_data, - model_or_sim, - structure, - enable, - path, - dimensions, - package, - self, - ) - if data is not None: - mult_list.set_data(data, key=0, autofill=True) - return mult_list - - def _structure_init(self): - # load datasets keywords into dictionary - for dataset_struct in self.structure.data_structures.values(): - for keyword in dataset_struct.get_keywords(): - self.datasets_keyword[keyword] = dataset_struct - # load block header data items into dictionary - for dataset in self.structure.block_header_structure: - self._new_dataset(dataset.name, dataset, True, None) - - def set_model_relative_path(self, model_ws): - """Sets `model_ws` as the model path relative to the simulation's - path. - - Parameters - ---------- - model_ws : str - Model path relative to the simulation's path. - """ - # update datasets - for key, dataset in self.datasets.items(): - if dataset.structure.file_data: - try: - file_data = dataset.get_data() - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message="Error occurred while " - "getting file data from " - '"{}"'.format(dataset.structure.name), - ) - if file_data: - # update file path location for all file paths - for file_line in file_data: - old_file_name = os.path.split(file_line[0])[1] - file_line[0] = os.path.join(model_ws, old_file_name) - # update block headers - for block_header in self.block_headers: - for dataset in block_header.data_items: - if dataset.structure.file_data: - try: - file_data = dataset.get_data() - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message="Error occurred while " - "getting file data from " - '"{}"'.format(dataset.structure.name), - ) - - if file_data: - # update file path location for all file paths - for file_line in file_data: - old_file_path, old_file_name = os.path.split(file_line[1]) - new_file_path = os.path.join(model_ws, old_file_name) - # update transient keys of datasets within the - # block - for key, idataset in self.datasets.items(): - if isinstance(idataset, mfdata.MFTransient): - idataset.update_transient_key( - file_line[1], new_file_path - ) - file_line[1] = os.path.join(model_ws, old_file_name) - - def add_dataset(self, dataset_struct, data, var_path): - """Add data to this block.""" - try: - self.datasets[var_path[-1]] = self.data_factory( - self._simulation_data, - self._model_or_sim, - dataset_struct, - True, - var_path, - self._dimensions, - data, - self._container_package, - ) - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message='Error occurred while adding dataset "{}" to block "{}"'.format( - dataset_struct.name, self.structure.name - ), - ) - - self._simulation_data.mfdata[var_path] = self.datasets[var_path[-1]] - dtype = dataset_struct.get_datatype() - if ( - dtype == mfstructure.DataType.list_transient - or dtype == mfstructure.DataType.list_multiple - or dtype == mfstructure.DataType.array_transient - ): - # build repeating block header(s) - if isinstance(data, dict): - # Add block headers for each dictionary key - for index in data: - if isinstance(index, tuple): - header_list = list(index) - else: - header_list = [index] - self._build_repeating_header(header_list) - elif isinstance(data, list): - # Add a single block header of value 0 - self._build_repeating_header([0]) - elif dtype != mfstructure.DataType.list_multiple and data is not None: - self._build_repeating_header([[0]]) - - return self.datasets[var_path[-1]] - - def _build_repeating_header(self, header_data): - if self.header_exists(header_data[0]): - return - if ( - len(self.block_headers[-1].data_items) == 1 - and self.block_headers[-1].data_items[0].get_data() is not None - ): - block_header_path = self.path + (len(self.block_headers) + 1,) - block_header = MFBlockHeader( - self.structure.name, - [], - MFComment("", self.path, self._simulation_data, 0), - self._simulation_data, - block_header_path, - self, - ) - self.block_headers.append(block_header) - else: - block_header_path = self.path + (len(self.block_headers),) - - struct = self.structure - last_header = self.block_headers[-1] - try: - last_header.build_header_variables( - self._simulation_data, - struct.block_header_structure, - block_header_path, - header_data, - self._dimensions, - ) - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message="Error occurred while building" - " block header variables for block " - '"{}"'.format(last_header.name), - ) - - def _new_dataset(self, key, dataset_struct, block_header=False, initial_val=None): - dataset_path = self.path + (key,) - if block_header: - if ( - dataset_struct.type == DatumType.integer - and initial_val is not None - and len(initial_val) >= 1 - and dataset_struct.get_record_size()[0] == 1 - ): - # stress periods are stored 0 based - initial_val = int(initial_val[0]) - 1 - if isinstance(initial_val, list): - initial_val_path = tuple(initial_val) - initial_val = [tuple(initial_val)] - else: - initial_val_path = initial_val - try: - new_data = self.data_factory( - self._simulation_data, - self._model_or_sim, - dataset_struct, - True, - dataset_path, - self._dimensions, - initial_val, - self._container_package, - ) - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message="Error occurred while adding" - ' dataset "{}" to block ' - '"{}"'.format(dataset_struct.name, self.structure.name), - ) - self.block_headers[-1].add_data_item(new_data, initial_val_path) - - else: - try: - self.datasets[key] = self.data_factory( - self._simulation_data, - self._model_or_sim, - dataset_struct, - True, - dataset_path, - self._dimensions, - initial_val, - self._container_package, - ) - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message="Error occurred while adding" - ' dataset "{}" to block ' - '"{}"'.format(dataset_struct.name, self.structure.name), - ) - for keyword in dataset_struct.get_keywords(): - self.datasets_keyword[keyword] = dataset_struct - - def is_empty(self): - """Returns true if this block is empty.""" - for key, dataset in self.datasets.items(): - try: - has_data = dataset.has_data() - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message="Error occurred while verifying" - ' data of dataset "{}" in block ' - '"{}"'.format(dataset.structure.name, self.structure.name), - ) - - if has_data is not None and has_data: - return False - return True - - def load(self, block_header, fd, strict=True): - """Loads block from file object. file object must be advanced to - beginning of block before calling. - - Parameters - ---------- - block_header : MFBlockHeader - Block header for block block being loaded. - fd : file - File descriptor of file being loaded - strict : bool - Enforce strict MODFLOW 6 file format. - """ - # verify number of header variables - if ( - len(block_header.variable_strings) - < self.structure.number_non_optional_block_header_data() - ): - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - warning_str = ( - 'WARNING: Block header for block "{}" does not ' - "contain the correct number of " - "variables {}".format(block_header.name, self.path) - ) - print(warning_str) - return - - if self.loaded: - # verify header has not already been loaded - for bh_current in self.block_headers: - if bh_current.is_same_header(block_header): - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - warning_str = ( - 'WARNING: Block header for block "{}" is ' - "not a unique block header " - "{}".format(block_header.name, self.path) - ) - print(warning_str) - return - - # init - self.enabled = True - if not self.loaded: - self.block_headers = [] - block_header.block = self - self.block_headers.append(block_header) - - # process any header variable - if len(self.structure.block_header_structure) > 0: - dataset = self.structure.block_header_structure[0] - self._new_dataset( - dataset.name, - dataset, - True, - self.block_headers[-1].variable_strings, - ) - - # handle special readasarrays case - if self._container_package.structure.read_as_arrays or ( - hasattr(self._container_package, "aux") - and self._container_package.aux.structure.layered - ): - # auxiliary variables may appear with aux variable name as keyword - aux_vars = self._container_package.auxiliary.get_data() - if aux_vars is not None: - for var_name in list(aux_vars[0])[1:]: - self.datasets_keyword[(var_name,)] = ( - self._container_package.aux.structure - ) - - comments = [] - - # capture any initial comments - initial_comment = MFComment("", "", 0) - fd_block = fd - line = fd_block.readline() - datautil.PyListUtil.reset_delimiter_used() - arr_line = datautil.PyListUtil.split_data_line(line) - post_data_comments = MFComment("", "", self._simulation_data, 0) - while MFComment.is_comment(line, True): - initial_comment.add_text(line) - line = fd_block.readline() - arr_line = datautil.PyListUtil.split_data_line(line) - - # if block not empty - external_file_info = None - if not (len(arr_line[0]) > 2 and arr_line[0][:3].upper() == "END"): - if arr_line[0].lower() == "open/close": - # open block contents from external file - fd_block.readline() - root_path = self._simulation_data.mfpath.get_sim_path() - try: - file_name = os.path.split(arr_line[1])[-1] - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print(f' opening external file "{file_name}"...') - external_file_info = arr_line - except: - type_, value_, traceback_ = sys.exc_info() - message = f'Error reading external file specified in line "{line}"' - raise MFDataException( - self._container_package.model_name, - self._container_package._get_pname(), - self.path, - "reading external file", - self.structure.name, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self._simulation_data.debug, - ) - if len(self.structure.data_structures) <= 1: - # load a single data set - dataset = self.datasets[next(iter(self.datasets))] - try: - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print(f" loading data {dataset.structure.name}...") - next_line = dataset.load( - line, - fd_block, - self.block_headers[-1], - initial_comment, - external_file_info, - ) - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message='Error occurred while loading data "{}" in ' - 'block "{}" from file "{}"' - ".".format( - dataset.structure.name, - self.structure.name, - fd_block.name, - ), - ) - package_info_list = self._get_package_info(dataset) - if package_info_list is not None: - for package_info in package_info_list: - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print(f" loading child package {package_info[0]}...") - fname = package_info[1] - if package_info[2] is not None: - fname = os.path.join(package_info[2], fname) - filemgr = self._simulation_data.mfpath - fname = filemgr.strip_model_relative_path( - self._model_or_sim.name, fname - ) - pkg = self._model_or_sim.load_package( - package_info[0], - fname, - package_info[1], - True, - "", - package_info[3], - self._container_package, - ) - if hasattr(self._container_package, package_info[0]): - package_group = getattr( - self._container_package, package_info[0] - ) - package_group._append_package(pkg, pkg.filename, False) - - if next_line[1] is not None: - arr_line = datautil.PyListUtil.split_data_line(next_line[1]) - else: - arr_line = "" - # capture any trailing comments - dataset.post_data_comments = post_data_comments - while arr_line and ( - len(next_line[1]) <= 2 or arr_line[0][:3].upper() != "END" - ): - next_line[1] = fd_block.readline().strip() - arr_line = datautil.PyListUtil.split_data_line(next_line[1]) - if arr_line and ( - len(next_line[1]) <= 2 or arr_line[0][:3].upper() != "END" - ): - post_data_comments.add_text(" ".join(arr_line)) - else: - # look for keyword and store line as data or comment - try: - key, results = self._find_data_by_keyword( - line, fd_block, initial_comment - ) - except MFInvalidTransientBlockHeaderException as e: - warning_str = f"WARNING: {e}" - print(warning_str) - self.block_headers.pop() - return - - self._save_comments(arr_line, line, key, comments) - if results[1] is None or results[1][:3].upper() != "END": - # block consists of unordered datasets - # load the data sets out of order based on - # initial constants - line = " " - while line != "": - line = fd_block.readline() - arr_line = datautil.PyListUtil.split_data_line(line) - if arr_line: - # determine if at end of block - if ( - len(arr_line[0]) > 2 - and arr_line[0][:3].upper() == "END" - ): - break - # look for keyword and store line as data o - # r comment - key, result = self._find_data_by_keyword( - line, fd_block, initial_comment - ) - self._save_comments(arr_line, line, key, comments) - if result[1] is not None and result[1][:3].upper() == "END": - break - else: - # block empty, store empty array in block variables - empty_arr = [] - for ds in self.datasets.values(): - if isinstance(ds, mfdata.MFTransient): - transient_key = block_header.get_transient_key() - ds.set_data(empty_arr, key=transient_key) - self.loaded = True - self.is_valid() - - def _find_data_by_keyword(self, line, fd, initial_comment): - first_key = None - nothing_found = False - next_line = [True, line] - while next_line[0] and not nothing_found: - arr_line = datautil.PyListUtil.split_data_line(next_line[1]) - key = datautil.find_keyword(arr_line, self.datasets_keyword) - if key is not None: - ds_name = self.datasets_keyword[key].name - try: - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print(f" loading data {ds_name}...") - next_line = self.datasets[ds_name].load( - next_line[1], - fd, - self.block_headers[-1], - initial_comment, - ) - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message="Error occurred while " - 'loading data "{}" in ' - 'block "{}" from file "{}"' - ".".format(ds_name, self.structure.name, fd.name), - ) - - # see if first item's name indicates a reference to - # another package - package_info_list = self._get_package_info(self.datasets[ds_name]) - if package_info_list is not None: - for package_info in package_info_list: - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print(f" loading child package {package_info[1]}...") - fname = package_info[1] - if package_info[2] is not None: - fname = os.path.join(package_info[2], fname) - filemgr = self._simulation_data.mfpath - fname = filemgr.strip_model_relative_path( - self._model_or_sim.name, fname - ) - pkg = self._model_or_sim.load_package( - package_info[0], - fname, - package_info[1], - True, - "", - package_info[3], - self._container_package, - ) - if hasattr(self._container_package, package_info[0]): - package_group = getattr( - self._container_package, package_info[0] - ) - package_group._append_package(pkg, pkg.filename, False) - if first_key is None: - first_key = key - nothing_found = False - elif ( - arr_line[0].lower() == "readasarrays" - and self.path[-1].lower() == "options" - and self._container_package.structure.read_as_arrays is False - ): - error_msg = ( - "ERROR: Attempting to read a ReadAsArrays " - "package as a non-ReadAsArrays " - "package {}".format(self.path) - ) - raise ReadAsArraysException(error_msg) - else: - nothing_found = True - - if first_key is None: - # look for recarrays. if there is a lone recarray in this block, - # use it by default - recarrays = self.structure.get_all_recarrays() - if len(recarrays) != 1: - return key, [None, None] - dataset = self.datasets[recarrays[0].name] - ds_result = dataset.load(line, fd, self.block_headers[-1], initial_comment) - - # see if first item's name indicates a reference to another - # package - package_info_list = self._get_package_info(dataset) - if package_info_list is not None: - for package_info in package_info_list: - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print(f" loading child package {package_info[0]}...") - fname = package_info[1] - if package_info[2] is not None: - fname = os.path.join(package_info[2], fname) - filemgr = self._simulation_data.mfpath - fname = filemgr.strip_model_relative_path( - self._model_or_sim.name, fname - ) - pkg = self._model_or_sim.load_package( - package_info[0], - fname, - None, - True, - "", - package_info[3], - self._container_package, - ) - if hasattr(self._container_package, package_info[0]): - package_group = getattr( - self._container_package, package_info[0] - ) - package_group._append_package(pkg, pkg.filename, False) - - return recarrays[0].keyword, ds_result - else: - return first_key, next_line - - def _get_package_info(self, dataset): - if not dataset.structure.file_data: - return None - for index in range(0, len(dataset.structure.data_item_structures)): - data_item = dataset.structure.data_item_structures[index] - if ( - data_item.type == DatumType.keyword - or data_item.type == DatumType.string - ): - item_name = data_item.name - package_type = item_name[:-1] - model_type = self._model_or_sim.structure.model_type - # not all packages have the same naming convention - # try different naming conventions to find the appropriate - # package - package_types = [ - package_type, - f"{self._container_package.package_type}{package_type}", - ] - package_type_found = None - for ptype in package_types: - if PackageContainer.package_factory(ptype, model_type) is not None: - package_type_found = ptype - break - if package_type_found is not None: - try: - data = dataset.get_data() - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message="Error occurred while " - 'getting data from "{}" ' - 'in block "{}".'.format( - dataset.structure.name, self.structure.name - ), - ) - package_info_list = [] - if isinstance(data, np.recarray): - for row in data: - self._add_to_info_list( - package_info_list, - row[index], - package_type_found, - ) - else: - self._add_to_info_list( - package_info_list, data, package_type_found - ) - - return package_info_list - return None - - def _add_to_info_list(self, package_info_list, file_location, package_type_found): - file_path, file_name = os.path.split(file_location) - dict_package_name = f"{package_type_found}_{self.path[-2]}" - package_info_list.append( - ( - package_type_found, - file_name, - file_path, - dict_package_name, - ) - ) - - def _save_comments(self, arr_line, line, key, comments): - # FIX: Save these comments somewhere in the data set - if key not in self.datasets_keyword: - if MFComment.is_comment(key, True): - if comments: - comments.append("\n") - comments.append(arr_line) - - def write(self, fd, ext_file_action=ExtFileAction.copy_relative_paths): - """Writes block to a file object. - - Parameters - ---------- - fd : file object - File object to write to. - - """ - # never write an empty block - is_empty = self.is_empty() - if ( - is_empty - and self.structure.name.lower() != "exchanges" - and self.structure.name.lower() != "options" - and self.structure.name.lower() != "sources" - and self.structure.name.lower() != "stressperioddata" - ): - return - if self.structure.repeating(): - repeating_datasets = self._find_repeating_datasets() - for repeating_dataset in repeating_datasets: - # resolve any missing block headers - self._add_missing_block_headers(repeating_dataset) - for block_header in sorted(self.block_headers): - # write block - self._write_block(fd, block_header, ext_file_action) - else: - self._write_block(fd, self.block_headers[0], ext_file_action) - - def _add_missing_block_headers(self, repeating_dataset): - key_data_list = repeating_dataset.get_active_key_list() - # assemble a dictionary of data keys and empty keys - key_dict = {} - for key in key_data_list: - key_dict[key[0]] = True - for key, value in repeating_dataset.empty_keys.items(): - if value: - key_dict[key] = True - for key in key_dict.keys(): - has_data = repeating_dataset.has_data(key) - empty_key = ( - key in repeating_dataset.empty_keys - and repeating_dataset.empty_keys[key] - ) - if not self.header_exists(key) and (has_data or empty_key): - self._build_repeating_header([key]) - - def header_exists(self, key, data_path=None): - if not isinstance(key, list): - if key is None: - return - comp_key_list = [key] - else: - comp_key_list = key - for block_header in self.block_headers: - transient_key = block_header.get_transient_key(data_path) - if transient_key is True: - return - for comp_key in comp_key_list: - if transient_key is not None and transient_key == comp_key: - return True - return False - - def set_all_data_external( - self, - base_name, - check_data=True, - external_data_folder=None, - binary=False, - ): - """Sets the block's list and array data to be stored externally, - base_name is external file name's prefix, check_data determines - if data error checking is enabled during this process. - - Warning - ------- - The MF6 check mechanism is deprecated pending reimplementation - in a future release. While the checks API will remain in place - through 3.x, it may be unstable, and will likely change in 4.x. - - Parameters - ---------- - base_name : str - Base file name of external files where data will be written to. - check_data : bool - Whether to do data error checking. - external_data_folder - Folder where external data will be stored - binary: bool - Whether file will be stored as binary - - """ - - for key, dataset in self.datasets.items(): - lst_data = isinstance(dataset, mfdatalist.MFList) or isinstance( - dataset, mfdataplist.MFPandasList - ) - if ( - isinstance(dataset, mfdataarray.MFArray) - or (lst_data and dataset.structure.type == DatumType.recarray) - and dataset.enabled - ): - if not binary or ( - lst_data - and ( - dataset.data_dimensions.package_dim.boundnames() - or not dataset.structure.basic_item - ) - ): - ext = "txt" - binary = False - else: - ext = "bin" - file_path = f"{base_name}_{dataset.structure.name}.{ext}" - replace_existing_external = False - if external_data_folder is not None: - # get simulation root path - root_path = self._simulation_data.mfpath.get_sim_path() - # get model relative path, if it exists - if isinstance(self._model_or_sim, ModelInterface): - name = self._model_or_sim.name - rel_path = self._simulation_data.mfpath.model_relative_path[ - name - ] - if rel_path is not None: - root_path = os.path.join(root_path, rel_path) - full_path = os.path.join(root_path, external_data_folder) - if not os.path.exists(full_path): - # create new external data folder - os.makedirs(full_path) - file_path = os.path.join(external_data_folder, file_path) - replace_existing_external = True - dataset.store_as_external_file( - file_path, - replace_existing_external=replace_existing_external, - check_data=check_data, - binary=binary, - ) - - def set_all_data_internal(self, check_data=True): - """Sets the block's list and array data to be stored internally, - check_data determines if data error checking is enabled during this - process. - - Warning - ------- - The MF6 check mechanism is deprecated pending reimplementation - in a future release. While the checks API will remain in place - through 3.x, it may be unstable, and will likely change in 4.x. - - Parameters - ---------- - check_data : bool - Whether to do data error checking. - - """ - - for key, dataset in self.datasets.items(): - if ( - isinstance(dataset, mfdataarray.MFArray) - or ( - ( - isinstance(dataset, mfdatalist.MFList) - or isinstance(dataset, mfdataplist.MFPandasList) - ) - and dataset.structure.type == DatumType.recarray - ) - and dataset.enabled - ): - dataset.store_internal(check_data=check_data) - - def _find_repeating_datasets(self): - repeating_datasets = [] - for key, dataset in self.datasets.items(): - if dataset.repeating: - repeating_datasets.append(dataset) - return repeating_datasets - - def _prepare_external(self, fd, file_name, binary=False): - fd_main = fd - fd_path = self._simulation_data.mfpath.get_model_path(self.path[0]) - # resolve full file and folder path - fd_file_path = os.path.join(fd_path, file_name) - fd_folder_path = os.path.split(fd_file_path)[0] - if fd_folder_path != "": - if not os.path.exists(fd_folder_path): - # create new external data folder - os.makedirs(fd_folder_path) - return fd_main, fd_file_path - - def _write_block(self, fd, block_header, ext_file_action): - transient_key = None - basic_list = False - dataset_one = list(self.datasets.values())[0] - if isinstance( - dataset_one, - (mfdataplist.MFPandasList, mfdataplist.MFPandasTransientList), - ): - basic_list = True - for dataset in self.datasets.values(): - assert isinstance( - dataset, - ( - mfdataplist.MFPandasList, - mfdataplist.MFPandasTransientList, - ), - ) - # write block header - block_header.write_header(fd) - if len(block_header.data_items) > 0: - transient_key = block_header.get_transient_key() - - # gather data sets to write - data_set_output = [] - data_found = False - for key, dataset in self.datasets.items(): - try: - if transient_key is None: - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print(f" writing data {dataset.structure.name}...") - if basic_list: - ext_fname = dataset.external_file_name() - if ext_fname is not None: - binary = dataset.binary_ext_data() - # write block contents to external file - fd_main, fd = self._prepare_external(fd, ext_fname, binary) - dataset.write_file_entry(fd, fd_main=fd_main) - fd = fd_main - else: - dataset.write_file_entry(fd) - else: - data_set_output.append( - dataset.get_file_entry(ext_file_action=ext_file_action) - ) - data_found = True - else: - if ( - self._simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print( - " writing data {} ({})...".format( - dataset.structure.name, transient_key - ) - ) - if basic_list: - ext_fname = dataset.external_file_name(transient_key) - if ext_fname is not None: - binary = dataset.binary_ext_data(transient_key) - # write block contents to external file - fd_main, fd = self._prepare_external(fd, ext_fname, binary) - dataset.write_file_entry( - fd, - transient_key, - ext_file_action=ext_file_action, - fd_main=fd_main, - ) - fd = fd_main - else: - dataset.write_file_entry( - fd, - transient_key, - ext_file_action=ext_file_action, - ) - else: - if dataset.repeating: - output = dataset.get_file_entry( - transient_key, ext_file_action=ext_file_action - ) - if output is not None: - data_set_output.append(output) - data_found = True - else: - data_set_output.append( - dataset.get_file_entry(ext_file_action=ext_file_action) - ) - data_found = True - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message=( - "Error occurred while writing data " - f'"{dataset.structure.name}" in block ' - f'"{self.structure.name}" to file "{fd.name}"' - ), - ) - if not data_found: - return - if not basic_list: - # write block header - block_header.write_header(fd) - - if self.external_file_name is not None: - indent_string = self._simulation_data.indent_string - fd.write(f'{indent_string}open/close "{self.external_file_name}"\n') - # write block contents to external file - fd_main, fd = self._prepare_external(fd, self.external_file_name) - # write data sets - for output in data_set_output: - fd.write(output) - - # write trailing comments - pth = block_header.blk_trailing_comment_path - if pth in self._simulation_data.mfdata: - self._simulation_data.mfdata[pth].write(fd) - - if self.external_file_name is not None and not basic_list: - # switch back writing to package file - fd.close() - fd = fd_main - - # write block footer - block_header.write_footer(fd) - - # write post block comments - pth = block_header.blk_post_comment_path - if pth in self._simulation_data.mfdata: - self._simulation_data.mfdata[pth].write(fd) - - # write extra line if comments are off - if not self._simulation_data.comments_on: - fd.write("\n") - - def is_allowed(self): - """Determine if block is valid based on the values of dependent - MODFLOW variables.""" - if self.structure.variable_dependant_path: - # fill in empty part of the path with the current path - if len(self.structure.variable_dependant_path) == 3: - dependant_var_path = ( - self.path[0], - ) + self.structure.variable_dependant_path - elif len(self.structure.variable_dependant_path) == 2: - dependant_var_path = ( - self.path[0], - self.path[1], - ) + self.structure.variable_dependant_path - elif len(self.structure.variable_dependant_path) == 1: - dependant_var_path = ( - self.path[0], - self.path[1], - self.path[2], - ) + self.structure.variable_dependant_path - else: - dependant_var_path = None - - # get dependency - dependant_var = None - mf_data = self._simulation_data.mfdata - if dependant_var_path in mf_data: - dependant_var = mf_data[dependant_var_path] - - # resolve dependency - if self.structure.variable_value_when_active[0] == "Exists": - exists = self.structure.variable_value_when_active[1] - if dependant_var and exists.lower() == "true": - return True - elif not dependant_var and exists.lower() == "false": - return True - else: - return False - elif not dependant_var: - return False - elif self.structure.variable_value_when_active[0] == ">": - min_val = self.structure.variable_value_when_active[1] - if dependant_var > float(min_val): - return True - else: - return False - elif self.structure.variable_value_when_active[0] == "<": - max_val = self.structure.variable_value_when_active[1] - if dependant_var < float(max_val): - return True - else: - return False - return True - - def is_valid(self): - """ - Returns true if the block is valid. - """ - # check data sets - for dataset in self.datasets.values(): - # Non-optional datasets must be enabled - if not dataset.structure.optional and not dataset.enabled: - return False - # Enabled blocks must be valid - if dataset.enabled and not dataset.is_valid: - return False - # check variables - for block_header in self.block_headers: - for dataset in block_header.data_items: - # Non-optional datasets must be enabled - if not dataset.structure.optional and not dataset.enabled: - return False - # Enabled blocks must be valid - if dataset.enabled and not dataset.is_valid(): - return False - - -class MFPackage(PackageInterface): - """ - Provides an interface for the user to specify data to build a package. - - Parameters - ---------- - parent : MFModel, MFSimulation, or MFPackage - The parent model, simulation, or package containing this package - package_type : str - String defining the package type - filename : str or PathLike - Name or path of file where this package is stored - quoted_filename : str - Filename with quotes around it when there is a space in the name - pname : str - Package name - loading_package : bool - Whether or not to add this package to the parent container's package - list during initialization - - Attributes - ---------- - blocks : dict - Dictionary of blocks contained in this package by block name - path : tuple - Data dictionary path to this package - structure : PackageStructure - Describes the blocks and data contain in this package - dimensions : PackageDimension - Resolves data dimensions for data within this package - - """ - - def __init__( - self, - parent, - package_type, - filename=None, - pname=None, - loading_package=False, - **kwargs, - ): - parent_file = kwargs.pop("parent_file", None) - if isinstance(parent, MFPackage): - self.model_or_sim = parent.model_or_sim - self.parent_file = parent - elif parent_file is not None: - self.model_or_sim = parent - self.parent_file = parent_file - else: - self.model_or_sim = parent - self.parent_file = None - _internal_package = kwargs.pop("_internal_package", False) - if _internal_package: - self.internal_package = True - else: - self.internal_package = False - self._data_list = [] - self._package_type = package_type - if self.model_or_sim.type == "Model" and package_type.lower() != "nam": - self.model_name = self.model_or_sim.name - else: - self.model_name = None - - # a package must have a dfn_file_name - if not hasattr(self, "dfn_file_name"): - self.dfn_file_name = "" - - if self.model_or_sim.type != "Model" and self.model_or_sim.type != "Simulation": - message = ( - "Invalid model_or_sim parameter. Expecting either a " - 'model or a simulation. Instead type "{}" was ' - "given.".format(type(self.model_or_sim)) - ) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.model_name, - pname, - "", - "initializing package", - None, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self.model_or_sim.simulation_data.debug, - ) - - self._package_container = PackageContainer(self.model_or_sim.simulation_data) - self.simulation_data = self.model_or_sim.simulation_data - - self.blocks = {} - self.container_type = [] - self.loading_package = loading_package - if pname is not None: - if not isinstance(pname, str): - message = ( - "Invalid pname parameter. Expecting type str. " - 'Instead type "{}" was ' - "given.".format(type(pname)) - ) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.model_name, - pname, - "", - "initializing package", - None, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self.model_or_sim.simulation_data.debug, - ) - - self.package_name = pname.lower() - else: - self.package_name = None - - if filename is None: - if self.model_or_sim.type == "Simulation": - # filename uses simulation base name - base_name = os.path.basename(os.path.normpath(self.model_or_sim.name)) - self._filename = f"{base_name}.{package_type}" - else: - # filename uses model base name - self._filename = f"{self.model_or_sim.name}.{package_type}" - else: - if not isinstance(filename, (str, os.PathLike)): - message = ( - "Invalid fname parameter. Expecting type str. " - 'Instead type "{}" was ' - "given.".format(type(filename)) - ) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.model_name, - pname, - "", - "initializing package", - None, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self.model_or_sim.simulation_data.debug, - ) - self._filename = datautil.clean_filename(str(filename).replace("\\", "/")) - self.path, self.structure = self.model_or_sim.register_package( - self, not loading_package, pname is None, filename is None - ) - self.dimensions = self.create_package_dimensions() - - if self.path is None: - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - print( - "WARNING: Package type {} failed to register property. {}".format( - self._package_type, self.path - ) - ) - if self.parent_file is not None: - self.container_type.append(PackageContainerType.package) - # init variables that may be used later - self.post_block_comments = None - self.last_error = None - self.bc_color = "black" - self.__inattr = False - self._child_package_groups = {} - child_builder_call = kwargs.pop("child_builder_call", None) - if ( - self.parent_file is not None - and child_builder_call is None - and package_type in self.parent_file._child_package_groups - ): - # initialize as part of the parent's child package group - chld_pkg_grp = self.parent_file._child_package_groups[package_type] - chld_pkg_grp.init_package(self, self._filename, False) - - # remove any remaining valid kwargs - key_list = list(kwargs.keys()) - for key in key_list: - if "filerecord" in key and hasattr(self, f"{key}"): - kwargs.pop(f"{key}") - # check for extraneous kwargs - if len(kwargs) > 0: - kwargs_str = ", ".join(kwargs.keys()) - excpt_str = f'Extraneous kwargs "{kwargs_str}" provided to MFPackage.' - raise FlopyException(excpt_str) - - def __init_subclass__(cls): - """Register package type""" - super().__init_subclass__() - PackageContainer.packages_by_abbr[cls.package_abbr] = cls - - def __setattr__(self, name, value): - if hasattr(self, name) and getattr(self, name) is not None: - attribute = object.__getattribute__(self, name) - if attribute is not None and isinstance(attribute, mfdata.MFData): - try: - if isinstance(attribute, mfdatalist.MFList): - attribute.set_data(value, autofill=True) - else: - attribute.set_data(value) - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, - model=self.model_name, - package=self._get_pname(), - ) - return - - if all(hasattr(self, attr) for attr in ["model_or_sim", "_package_type"]): - if hasattr(self.model_or_sim, "_mg_resync"): - if not self.model_or_sim._mg_resync: - self.model_or_sim._mg_resync = self._mg_resync - - super().__setattr__(name, value) - - def __repr__(self): - return self._get_data_str(True) - - def __str__(self): - return self._get_data_str(False) - - @property - def filename(self): - """Package's file name.""" - return self._filename - - @property - def quoted_filename(self): - """Package's file name with quotes if there is a space.""" - if " " in self._filename: - return f'"{self._filename}"' - return self._filename - - @filename.setter - def filename(self, fname): - """Package's file name.""" - if ( - isinstance(self.parent_file, MFPackage) - and self.package_type in self.parent_file._child_package_groups - ): - fname = datautil.clean_filename(fname) - try: - child_pkg_group = self.parent_file._child_package_groups[ - self.structure.file_type - ] - child_pkg_group._update_filename(self._filename, fname) - except Exception: - print( - "WARNING: Unable to update file name for parent" - f"package of {self.package_name}." - ) - if self.model_or_sim is not None and fname is not None: - if self._package_type != "nam": - self.model_or_sim.update_package_filename(self, fname) - self._filename = fname - - @property - def package_type(self): - """String describing type of package""" - return self._package_type - - @property - def name(self): - """Name of package""" - return [self.package_name] - - @name.setter - def name(self, name): - """Name of package""" - self.package_name = name - - @property - def parent(self): - """Parent package""" - return self.model_or_sim - - @parent.setter - def parent(self, parent): - """Parent package""" - assert False, "Do not use this setter to set the parent" - - @property - def plottable(self): - """If package is plottable""" - if self.model_or_sim.type == "Simulation": - return False - else: - return True - - @property - def output(self): - """ - Method to get output associated with a specific package - - Returns - ------- - MF6Output object - """ - return MF6Output(self) - - @property - def data_list(self): - """List of data in this package.""" - # return [data_object, data_object, ...] - return self._data_list - - @property - def package_key_dict(self): - """ - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_type_dict - - @property - def package_names(self): - """Returns a list of package names. - - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_names - - @property - def package_dict(self): - """ - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_dict - - @property - def package_type_dict(self): - """ - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_type_dict - - @property - def package_name_dict(self): - """ - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_name_dict - - @property - def package_filename_dict(self): - """ - .. deprecated:: 3.9 - This method is for internal use only and will be deprecated. - """ - warnings.warn( - "This method is for internal use only and will be deprecated.", - category=DeprecationWarning, - ) - return self._package_container.package_filename_dict - - def netcdf_attrs(self, mesh=None): - attrs = {} - - def attr_d(tagname, iaux=None, layer=None): - tag = tagname - name = f"{self.package_name}" - if iaux: - auxvar = self.dimensions.get_aux_variables()[0] - tag = f"{tag}/{iaux}" - name = f"{name}_{auxvar[iaux]}" - else: - name = f"{name}_{tagname}" - if layer: - tag = f"{tag}/layer{layer}" - name = f"{name}_l{layer}" - - a = {} - a["varname"] = name - a["attrs"] = {} - a["attrs"]["modflow_input"] = ( - f"{self.model_name}/{self.package_name}/{tagname}" - ).upper() - if iaux: - a["attrs"]["modflow_iaux"] = iaux - if layer: - a["attrs"]["layer"] = layer - return tag, a - - for key, block in self.blocks.items(): - if key != "griddata" and key != "period": - continue - for dataset in block.datasets.values(): - if isinstance(dataset, mfdataarray.MFArray): - for index, data_item in enumerate( - dataset.structure.data_item_structures - ): - if not (dataset.structure.netcdf and dataset.has_data()): - continue - if dataset.structure.layered and mesh == "LAYERED": - if data_item.name == "aux" or data_item.name == "auxvar": - for n, auxname in enumerate( - self.dimensions.get_aux_variables()[0] - ): - if auxname == "auxiliary" and n == 0: - continue - for l in range(self.model_or_sim.modelgrid.nlay): - key, a = attr_d(data_item.name, n, l + 1) - attrs[key] = a - else: - for l in range(self.model_or_sim.modelgrid.nlay): - key, a = attr_d(data_item.name, layer=l + 1) - attrs[key] = a - else: - if data_item.name == "aux" or data_item.name == "auxvar": - for n, auxname in enumerate( - self.dimensions.get_aux_variables()[0] - ): - if auxname == "auxiliary" and n == 0: - continue - key, a = attr_d(data_item.name, iaux=n) - attrs[key] = a - else: - key, a = attr_d(data_item.name) - attrs[key] = a - return attrs - - def get_package(self, name=None, type_only=False, name_only=False): - """ - Finds a package by package name, package key, package type, or partial - package name. returns either a single package, a list of packages, - or None. - - Parameters - ---------- - name : str - Name or type of the package, 'my-riv-1, 'RIV', 'LPF', etc. - type_only : bool - Search for package by type only - name_only : bool - Search for package by name only - - Returns - ------- - pp : Package object - - """ - return self._package_container.get_package(name, type_only, name_only) - - def add_package(self, package): - pkg_type = package.package_type.lower() - if pkg_type in self._package_container.package_type_dict: - for existing_pkg in self._package_container.package_type_dict[pkg_type]: - if existing_pkg is package: - # do not add the same package twice - return - self._package_container.add_package(package) - - def _get_aux_data(self, aux_names): - if hasattr(self, "stress_period_data"): - spd = self.stress_period_data.get_data() - if ( - 0 in spd - and spd[0] is not None - and aux_names[0][1] in spd[0].dtype.names - ): - return spd - if hasattr(self, "packagedata"): - pd = self.packagedata.get_data() - if aux_names[0][1] in pd.dtype.names: - return pd - if hasattr(self, "perioddata"): - spd = self.perioddata.get_data() - if ( - 0 in spd - and spd[0] is not None - and aux_names[0][1] in spd[0].dtype.names - ): - return spd - if hasattr(self, "aux"): - return self.aux.get_data() - return None - - def _boundnames_active(self): - if hasattr(self, "boundnames"): - if self.boundnames.get_data(): - return True - return False - - def check(self, f=None, verbose=True, level=1, checktype=None): - """ - Data check, returns True on success. - - Warning - ------- - The MF6 check mechanism is deprecated pending reimplementation - in a future release. While the checks API will remain in place - through 3.x, it may be unstable, and will likely change in 4.x. - """ - - if checktype is None: - checktype = mf6check - # do general checks - chk = super().check(f, verbose, level, checktype) - - # do mf6 specific checks - if hasattr(self, "auxiliary"): - # auxiliary variable check - # check if auxiliary variables are defined - aux_names = self.auxiliary.get_data() - if aux_names is not None and len(aux_names[0]) > 1: - num_aux_names = len(aux_names[0]) - 1 - # check for stress period data - aux_data = self._get_aux_data(aux_names) - if aux_data is not None and len(aux_data) > 0: - # make sure the check object exists - if chk is None: - chk = self._get_check(f, verbose, level, checktype) - if isinstance(aux_data, dict): - aux_datasets = list(aux_data.values()) - else: - aux_datasets = [aux_data] - dataset_type = "unknown" - for dataset in aux_datasets: - if isinstance(dataset, np.recarray): - dataset_type = "recarray" - break - elif isinstance(dataset, np.ndarray): - dataset_type = "ndarray" - break - # if aux data is in a list - if dataset_type == "recarray": - # check for time series data - time_series_name_dict = {} - if hasattr(self, "ts") and hasattr( - self.ts, "time_series_namerecord" - ): - # build dictionary of time series data variables - ts_nr = self.ts.time_series_namerecord.get_data() - if ts_nr is not None: - for item in ts_nr: - if len(item) > 0 and item[0] is not None: - time_series_name_dict[item[0]] = True - # auxiliary variables are last unless boundnames - # defined, then second to last - if self._boundnames_active(): - offset = 1 - else: - offset = 0 - - # loop through stress period datasets with aux data - for data in aux_datasets: - if isinstance(data, np.recarray): - for row in data: - row_size = len(row) - aux_start_loc = ( - row_size - num_aux_names - offset - 1 - ) - # loop through auxiliary variables - for idx, var in enumerate(list(aux_names[0])[1:]): - # get index of current aux variable - data_index = aux_start_loc + idx - # verify auxiliary value is either - # numeric or time series variable - if ( - not datautil.DatumUtil.is_float( - row[data_index] - ) - and row[data_index] - not in time_series_name_dict - ): - desc = ( - f"Invalid non-numeric " - f"value " - f"'{row[data_index]}' " - f"in auxiliary data." - ) - chk._add_to_summary( - "Error", - desc=desc, - package=self.package_name, - ) - # else if stress period data is arrays - elif dataset_type == "ndarray": - # loop through auxiliary stress period datasets - for data in aux_datasets: - # verify auxiliary value is either numeric or time - # array series variable - if isinstance(data, np.ndarray): - val = np.isnan(np.sum(data)) - if val: - desc = ( - "One or more nan values were " - "found in auxiliary data." - ) - chk._add_to_summary( - "Warning", - desc=desc, - package=self.package_name, - ) - return chk - - def _get_nan_exclusion_list(self): - excl_list = [] - if hasattr(self, "stress_period_data"): - spd_struct = self.stress_period_data.structure - for item_struct in spd_struct.data_item_structures: - if item_struct.optional or item_struct.keystring_dict: - excl_list.append(item_struct.name) - return excl_list - - def _get_data_str(self, formal, show_data=True): - data_str = ( - "package_name = {}\nfilename = {}\npackage_type = {}" - "\nmodel_or_simulation_package = {}" - "\n{}_name = {}" - "\n".format( - self._get_pname(), - self._filename, - self.package_type, - self.model_or_sim.type.lower(), - self.model_or_sim.type.lower(), - self.model_or_sim.name, - ) - ) - if self.parent_file is not None and formal: - data_str = f"{data_str}parent_file = {self.parent_file._get_pname()}\n\n" - else: - data_str = f"{data_str}\n" - if show_data: - for block in self.blocks.values(): - if formal: - bl_repr = repr(block) - if len(bl_repr.strip()) > 0: - data_str = "{}Block {}\n--------------------\n{}\n".format( - data_str, block.structure.name, repr(block) - ) - else: - bl_str = str(block) - if len(bl_str.strip()) > 0: - data_str = "{}Block {}\n--------------------\n{}\n".format( - data_str, block.structure.name, str(block) - ) - return data_str - - def _get_pname(self): - if self.package_name is not None: - return str(self.package_name) - else: - return str(self._filename) - - def _get_block_header_info(self, line, path): - # init - header_variable_strs = [] - arr_clean_line = line.strip().split() - header_comment = MFComment( - "", path + (arr_clean_line[1],), self.simulation_data, 0 - ) - # break header into components - if len(arr_clean_line) < 2: - message = ( - "Block header does not contain a name. Name " - 'expected in line "{}".'.format(line) - ) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.model_name, - self._get_pname(), - self.path, - "parsing block header", - None, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self.simulation_data.debug, - ) - elif len(arr_clean_line) == 2: - return MFBlockHeader( - arr_clean_line[1], - header_variable_strs, - header_comment, - self.simulation_data, - path, - ) - else: - # process text after block name - comment = False - for entry in arr_clean_line[2:]: - # if start of comment - if MFComment.is_comment(entry.strip()[0]): - comment = True - if comment: - header_comment.text = " ".join([header_comment.text, entry]) - else: - header_variable_strs.append(entry) - return MFBlockHeader( - arr_clean_line[1], - header_variable_strs, - header_comment, - self.simulation_data, - path, - ) - - def _update_size_defs(self): - # build temporary data lookup by name - data_lookup = {} - for block in self.blocks.values(): - for dataset in block.datasets.values(): - data_lookup[dataset.structure.name] = dataset - - # loop through all data - for block in self.blocks.values(): - for dataset in block.datasets.values(): - # if data shape is 1-D - if dataset.structure.shape and len(dataset.structure.shape) == 1: - # if shape name is data in this package - if dataset.structure.shape[0] in data_lookup: - size_def = data_lookup[dataset.structure.shape[0]] - size_def_name = size_def.structure.name - - if isinstance(dataset, mfdata.MFTransient): - # for transient data always use the maximum size - new_size = -1 - for key in dataset.get_active_key_list(): - try: - data = dataset.get_data(key=key[0]) - except (OSError, MFDataException): - # TODO: Handle case where external file - # path has been moved - data = None - if data is not None: - data_len = len(data) - if data_len > new_size: - new_size = data_len - else: - # for all other data set max to size - new_size = -1 - try: - data = dataset.get_data() - except (OSError, MFDataException): - # TODO: Handle case where external file - # path has been moved - data = None - if data is not None: - new_size = len(dataset.get_data()) - - if size_def.get_data() is None: - current_size = -1 - else: - current_size = size_def.get_data() - - if new_size > current_size: - # store current size - size_def.set_data(new_size) - - # informational message to the user - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - print( - "INFORMATION: {} in {} changed to {} " - "based on size of {}".format( - size_def_name, - size_def.structure.path[:-1], - new_size, - dataset.structure.name, - ) - ) - - def inspect_cells(self, cell_list, stress_period=None): - """ - Inspect model cells. Returns package data associated with cells. - - Parameters - ---------- - cell_list : list of tuples - List of model cells. Each model cell is a tuple of integers. - ex: [(1,1,1), (2,4,3)] - stress_period : int - For transient data, only return data from this stress period. If - not specified or None, all stress period data will be returned. - - Returns - ------- - output : array - Array containing inspection results - - """ - data_found = [] - - # loop through blocks - local_index_names = [] - local_index_blocks = [] - local_index_values = [] - local_index_cellids = [] - # loop through blocks in package - for block in self.blocks.values(): - # loop through data in block - for dataset in block.datasets.values(): - if isinstance(dataset, mfdatalist.MFList): - # handle list data - cellid_column = None - local_index_name = None - # loop through list data column definitions - for index, data_item in enumerate( - dataset.structure.data_item_structures - ): - if index == 0 and data_item.type == DatumType.integer: - local_index_name = data_item.name - # look for cellid column in list data row - if isinstance(data_item, MFDataItemStructure) and ( - data_item.is_cellid or data_item.possible_cellid - ): - cellid_column = index - break - if cellid_column is not None: - data_output = DataSearchOutput(dataset.path) - local_index_vals = [] - local_index_cells = [] - # get data - if isinstance(dataset, mfdatalist.MFTransientList): - # data may be in multiple transient blocks, get - # data from appropriate blocks - main_data = dataset.get_data(stress_period) - if stress_period is not None: - main_data = {stress_period: main_data} - else: - # data is all in one block, get data - main_data = {-1: dataset.get_data()} - - # loop through each dataset - for key, value in main_data.items(): - if value is None: - continue - if data_output.data_header is None: - data_output.data_header = value.dtype.names - # loop through list data rows - for line in value: - # loop through list of cells we are searching - # for - for cell in cell_list: - if isinstance( - line[cellid_column], tuple - ) and cellids_equal(line[cellid_column], cell): - # save data found - data_output.data_entries.append(line) - data_output.data_entry_ids.append(cell) - data_output.data_entry_stress_period.append(key) - if datautil.DatumUtil.is_int(line[0]): - # save index data for further - # processing. assuming index is - # always first entry - local_index_vals.append(line[0]) - local_index_cells.append(cell) - - if local_index_name is not None and len(local_index_vals) > 0: - # capture index lookups for scanning related data - local_index_names.append(local_index_name) - local_index_blocks.append(block.path[-1]) - local_index_values.append(local_index_vals) - local_index_cellids.append(local_index_cells) - if len(data_output.data_entries) > 0: - data_found.append(data_output) - elif isinstance(dataset, mfdataarray.MFArray): - # handle array data - data_shape = copy.deepcopy( - dataset.structure.data_item_structures[0].shape - ) - if dataset.path[-1] == "top": - # top is a special case where the two datasets - # need to be combined to get the correct layer top - model_grid = self.model_or_sim.modelgrid - main_data = {-1: model_grid.top_botm} - data_shape.append("nlay") - else: - if isinstance(dataset, mfdataarray.MFTransientArray): - # data may be in multiple blocks, get data from - # appropriate blocks - main_data = dataset.get_data(stress_period) - if stress_period is not None: - main_data = {stress_period: main_data} - else: - # data is all in one block, get a process data - main_data = {-1: dataset.get_data()} - if main_data is None: - continue - data_output = DataSearchOutput(dataset.path) - # loop through datasets - for key, array_data in main_data.items(): - if array_data is None: - continue - self.model_or_sim.match_array_cells( - cell_list, data_shape, array_data, key, data_output - ) - if len(data_output.data_entries) > 0: - data_found.append(data_output) - - if len(local_index_names) > 0: - # look for data that shares the index value with data found - # for example a shared well or reach number - for block in self.blocks.values(): - # loop through data - for dataset in block.datasets.values(): - if isinstance(dataset, mfdatalist.MFList): - data_item = dataset.structure.data_item_structures[0] - data_output = DataSearchOutput(dataset.path) - # loop through previous data found - for ( - local_index_name, - local_index_vals, - cell_ids, - local_block_name, - ) in zip( - local_index_names, - local_index_values, - local_index_cellids, - local_index_blocks, - ): - if local_block_name == block.path[-1]: - continue - if ( - isinstance(data_item, MFDataItemStructure) - and data_item.name == local_index_name - and data_item.type == DatumType.integer - ): - # matching data index type found, get data - if isinstance(dataset, mfdatalist.MFTransientList): - # data may be in multiple blocks, get data - # from appropriate blocks - main_data = dataset.get_data(stress_period) - if stress_period is not None: - main_data = {stress_period: main_data} - else: - # data is all in one block - main_data = {-1: dataset.get_data()} - # loop through the data - for key, value in main_data.items(): - if value is None: - continue - if data_output.data_header is None: - data_output.data_header = value.dtype.names - # loop through each row of data - for line in value: - # loop through the index values we are - # looking for - for index_val, cell_id in zip( - local_index_vals, cell_ids - ): - # try to match index values we are - # looking for to the data - if index_val == line[0]: - # save data found - data_output.data_entries.append(line) - data_output.data_entry_ids.append( - index_val - ) - data_output.data_entry_cellids.append( - cell_id - ) - data_output.data_entry_stress_period.append( - key - ) - if len(data_output.data_entries) > 0: - data_found.append(data_output) - return data_found - - def remove(self): - """Removes this package from the simulation/model it is currently a - part of. - """ - self.model_or_sim.remove_package(self) - - def build_child_packages_container(self, pkg_type, filerecord): - """Builds a container object for any child packages. This method is - only intended for FloPy internal use.""" - # get package class - package_obj = PackageContainer.package_factory( - pkg_type, self.model_or_sim.model_type - ) - # create child package object - child_pkgs_name = f"utl{pkg_type}packages" - child_pkgs_obj = PackageContainer.package_factory(child_pkgs_name, "") - if child_pkgs_obj is None and self.model_or_sim.model_type is None: - # simulation level object, try just the package type in the name - child_pkgs_name = f"{pkg_type}packages" - child_pkgs_obj = PackageContainer.package_factory(child_pkgs_name, "") - if child_pkgs_obj is None: - # see if the package is part of one of the supported model types - for model_type in MFStructure().sim_struct.model_types: - child_pkgs_name = f"{model_type}{pkg_type}packages" - child_pkgs_obj = PackageContainer.package_factory(child_pkgs_name, "") - if child_pkgs_obj is not None: - break - child_pkgs = child_pkgs_obj( - self.model_or_sim, self, pkg_type, filerecord, None, package_obj - ) - setattr(self, pkg_type, child_pkgs) - self._child_package_groups[pkg_type] = child_pkgs - - def _get_dfn_name_dict(self): - dfn_name_dict = {} - item_num = 0 - for item in self.structure.dfn_list: - if len(item) > 1: - item_name = item[1].split() - if len(item_name) > 1 and item_name[0] == "name": - dfn_name_dict[item_name[1]] = item_num - item_num += 1 - return dfn_name_dict - - def build_child_package(self, pkg_type, data, parameter_name, filerecord): - """Builds a child package. This method is only intended for FloPy - internal use.""" - if not hasattr(self, pkg_type): - self.build_child_packages_container(pkg_type, filerecord) - if data is not None: - package_group = getattr(self, pkg_type) - # build child package file name - child_path = package_group.next_default_file_path() - # create new empty child package - package_obj = PackageContainer.package_factory( - pkg_type, self.model_or_sim.model_type - ) - package = package_obj(self, filename=child_path, child_builder_call=True) - assert hasattr(package, parameter_name) - - if isinstance(data, dict): - # order data correctly - dfn_name_dict = package._get_dfn_name_dict() - ordered_data_items = [] - for key, value in data.items(): - if key in dfn_name_dict: - ordered_data_items.append([dfn_name_dict[key], key, value]) - else: - ordered_data_items.append([999999, key, value]) - ordered_data_items = sorted(ordered_data_items, key=lambda x: x[0]) - - # evaluate and add data to package - unused_data = {} - for order, key, value in ordered_data_items: - # if key is an attribute of the child package - if isinstance(key, str) and hasattr(package, key): - # set child package attribute - child_data_attr = getattr(package, key) - if isinstance(child_data_attr, mfdatalist.MFList): - child_data_attr.set_data(value, autofill=True) - elif isinstance(child_data_attr, mfdata.MFData): - child_data_attr.set_data(value) - elif key == "fname" or key == "filename": - child_path = value - package._filename = value - else: - setattr(package, key, value) - else: - unused_data[key] = value - if unused_data: - setattr(package, parameter_name, unused_data) - else: - setattr(package, parameter_name, data) - - # append package to list - package_group.init_package(package, child_path) - return package - - def build_mfdata(self, var_name, data=None): - """Returns the appropriate data type object (mfdatalist, mfdataarray, - or mfdatascalar) given that object the appropriate structure (looked - up based on var_name) and any data supplied. This method is for - internal FloPy library use only. - - Parameters - ---------- - var_name : str - Variable name - - data : many supported types - Data contained in this object - - Returns - ------- - data object : MFData subclass - - """ - if self.loading_package: - data = None - for key, block in self.structure.blocks.items(): - if var_name in block.data_structures: - if block.name not in self.blocks: - self.blocks[block.name] = MFBlock( - self.simulation_data, - self.dimensions, - block, - self.path + (key,), - self.model_or_sim, - self, - ) - dataset_struct = block.data_structures[var_name] - var_path = self.path + (key, var_name) - ds = self.blocks[block.name].add_dataset(dataset_struct, data, var_path) - self._data_list.append(ds) - return ds - - message = 'Unable to find variable "{}" in package "{}".'.format( - var_name, self.package_type - ) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.model_name, - self._get_pname(), - self.path, - "building data objects", - None, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self.simulation_data.debug, - ) - - def set_model_relative_path(self, model_ws): - """Sets the model path relative to the simulation's path. - - Parameters - ---------- - model_ws : str - Model path relative to the simulation's path. - - """ - # update blocks - for key, block in self.blocks.items(): - block.set_model_relative_path(model_ws) - # update sub-packages - for package in self._package_container.packagelist: - package.set_model_relative_path(model_ws) - - def set_all_data_external( - self, - check_data=True, - external_data_folder=None, - base_name=None, - binary=False, - ): - """Sets the package's list and array data to be stored externally. - - Parameters - ---------- - check_data : bool - Determine if data error checking is enabled - external_data_folder - Folder where external data will be stored - base_name: str - Base file name prefix for all files - binary: bool - Whether file will be stored as binary - """ - # set blocks - for key, block in self.blocks.items(): - file_name = os.path.split(self.filename)[1] - if base_name is not None: - file_name = f"{base_name}_{file_name}" - block.set_all_data_external( - file_name, - check_data, - external_data_folder, - binary, - ) - # set sub-packages - for package in self._package_container.packagelist: - package.set_all_data_external( - check_data, - external_data_folder, - base_name, - binary, - ) - - def set_all_data_internal(self, check_data=True): - """Sets the package's list and array data to be stored internally. - - Parameters - ---------- - check_data : bool - Determine if data error checking is enabled - - """ - # set blocks - for key, block in self.blocks.items(): - block.set_all_data_internal(check_data) - # set sub-packages - for package in self._package_container.packagelist: - package.set_all_data_internal(check_data) - - def load(self, strict=True): - """Loads the package from file. - - Parameters - ---------- - strict : bool - Enforce strict checking of data. - - Returns - ------- - success : bool - - """ - # open file - try: - fd_input_file = open(datautil.clean_filename(self.get_file_path()), "r") - except OSError as e: - if e.errno == errno.ENOENT: - message = "File {} of type {} could not be opened.".format( - self.get_file_path(), self.package_type - ) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.model_name, - self.package_name, - self.path, - "loading package file", - None, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self.simulation_data.debug, - ) - - try: - self._load_blocks(fd_input_file, strict) - except ReadAsArraysException as err: - fd_input_file.close() - raise ReadAsArraysException(err) - # close file - fd_input_file.close() - - if self.simulation_data.auto_set_sizes: - self._update_size_defs() - - # return validity of file - return self.is_valid() - - def is_valid(self): - """Returns whether or not this package is valid. - - Returns - ------- - is valid : bool - - """ - # Check blocks - for block in self.blocks.values(): - # Non-optional blocks must be enabled - if ( - block.structure.number_non_optional_data() > 0 - and not block.enabled - and block.is_allowed() - ): - self.last_error = ( - f'Required block "{block.block_header.name}" not enabled' - ) - return False - # Enabled blocks must be valid - if block.enabled and not block.is_valid: - self.last_error = f'Invalid block "{block.block_header.name}"' - return False - - return True - - def _load_blocks(self, fd_input_file, strict=True, max_blocks=sys.maxsize): - # init - self.simulation_data.mfdata[self.path + ("pkg_hdr_comments",)] = MFComment( - "", self.path, self.simulation_data - ) - self.post_block_comments = MFComment("", self.path, self.simulation_data) - - blocks_read = 0 - found_first_block = False - line = " " - while line != "": - line = fd_input_file.readline() - clean_line = line.strip() - # If comment or empty line - if MFComment.is_comment(clean_line, True): - self._store_comment(line, found_first_block) - elif len(clean_line) > 4 and clean_line[:5].upper() == "BEGIN": - # parse block header - try: - block_header_info = self._get_block_header_info(line, self.path) - except MFDataException as mfde: - message = ( - "An error occurred while loading block header " - 'in line "{}".'.format(line) - ) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.model_name, - self._get_pname(), - self.path, - "loading block header", - None, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self.simulation_data.debug, - mfde, - ) - - # if there is more than one possible block with the same name, - # resolve the correct block to use - block_key = block_header_info.name.lower() - block_num = 1 - possible_key = f"{block_header_info.name.lower()}-{block_num}" - if possible_key in self.blocks: - block_key = possible_key - block_header_name = block_header_info.name.lower() - while ( - block_key in self.blocks - and not self.blocks[block_key].is_allowed() - ): - block_key = f"{block_header_name}-{block_num}" - block_num += 1 - - if block_key not in self.blocks: - # block name not recognized, load block as comments and - # issue a warning - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - warning_str = ( - 'WARNING: Block "{}" is not a valid block ' - "name for file type " - "{}.".format(block_key, self.package_type) - ) - print(warning_str) - self._store_comment(line, found_first_block) - while line != "": - line = fd_input_file.readline() - self._store_comment(line, found_first_block) - arr_line = datautil.PyListUtil.split_data_line(line) - if arr_line and ( - len(arr_line[0]) <= 2 or arr_line[0][:3].upper() == "END" - ): - break - else: - found_first_block = True - skip_block = False - cur_block = self.blocks[block_key] - if cur_block.loaded: - # Only blocks defined as repeating are allowed to have - # multiple entries - header_name = block_header_info.name - if not self.structure.blocks[header_name.lower()].repeating(): - # warn and skip block - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.normal.value - ): - warning_str = ( - 'WARNING: Block "{}" has ' - "multiple entries and is not " - "intended to be a repeating " - "block ({} package" - ")".format(header_name, self.package_type) - ) - print(warning_str) - skip_block = True - bhs = cur_block.structure.block_header_structure - bhval = block_header_info.variable_strings - if len(bhs) > 0 and len(bhval) > 0 and bhs[0].name == "iper": - nper = self.simulation_data.mfdata[ - ("tdis", "dimensions", "nper") - ].get_data() - bhval_int = datautil.DatumUtil.is_int(bhval[0]) - if not bhval_int or int(bhval[0]) > nper: - # skip block when block stress period is greater - # than nper - skip_block = True - - if not skip_block: - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print(f" loading block {cur_block.structure.name}...") - # reset comments - self.post_block_comments = MFComment( - "", self.path, self.simulation_data - ) - - cur_block.load(block_header_info, fd_input_file, strict) - - # write post block comment - self.simulation_data.mfdata[ - cur_block.block_headers[-1].blk_post_comment_path - ] = self.post_block_comments - - blocks_read += 1 - if blocks_read >= max_blocks: - break - else: - # treat skipped block as if it is all comments - arr_line = datautil.PyListUtil.split_data_line(clean_line) - self.post_block_comments.add_text(str(line), True) - while arr_line and ( - len(line) <= 2 or arr_line[0][:3].upper() != "END" - ): - line = fd_input_file.readline() - arr_line = datautil.PyListUtil.split_data_line(line.strip()) - if arr_line: - self.post_block_comments.add_text(str(line), True) - self.simulation_data.mfdata[ - cur_block.block_headers[-1].blk_post_comment_path - ] = self.post_block_comments - - else: - if not ( - len(clean_line) == 0 - or (len(line) > 2 and line[:3].upper() == "END") - ): - # Record file location of beginning of unresolved text - # treat unresolved text as a comment for now - self._store_comment(line, found_first_block) - - def write(self, ext_file_action=ExtFileAction.copy_relative_paths): - """Writes the package to a file. - - Parameters - ---------- - ext_file_action : ExtFileAction - How to handle pathing of external data files. - """ - if self.simulation_data.auto_set_sizes: - self._update_size_defs() - - # create any folders in path - package_file_path = self.get_file_path() - package_folder = os.path.split(package_file_path)[0] - if package_folder and not os.path.isdir(package_folder): - os.makedirs(os.path.split(package_file_path)[0]) - - # open file - fd = open(package_file_path, "w") - - # write flopy header - if self.simulation_data.write_headers: - dt = datetime.datetime.now() - header = "# File generated by Flopy version {} on {} at {}.\n".format( - __version__, - dt.strftime("%m/%d/%Y"), - dt.strftime("%H:%M:%S"), - ) - fd.write(header) - - # write blocks - self._write_blocks(fd, ext_file_action) - - fd.close() - - def create_package_dimensions(self): - """Creates a package dimensions object. For internal FloPy library - use. - - Returns - ------- - package dimensions : PackageDimensions - - """ - model_dims = None - if self.container_type[0] == PackageContainerType.model: - model_dims = [ - modeldimensions.ModelDimensions(self.path[0], self.simulation_data) - ] - else: - # this is a simulation file that does not correspond to a specific - # model. figure out which model to use and return a dimensions - # object for that model - if self.dfn_file_name[0:3] == "exg": - exchange_rec_array = self.simulation_data.mfdata[ - ("nam", "exchanges", "exchanges") - ].get_data() - if exchange_rec_array is None: - return None - for exchange in exchange_rec_array: - if exchange[1].lower() == self._filename.lower(): - model_dims = [ - modeldimensions.ModelDimensions( - exchange[2], self.simulation_data - ), - modeldimensions.ModelDimensions( - exchange[3], self.simulation_data - ), - ] - break - elif ( - self.dfn_file_name[4:7] == "gnc" - and self.model_or_sim.type == "Simulation" - ): - # get exchange file name associated with gnc package - if self.parent_file is not None: - exg_file_name = self.parent_file.filename - else: - raise Exception( - "Can not create a simulation-level " - "gnc file without a corresponding " - "exchange file. Exchange file must be " - "created first." - ) - # get models associated with exchange file from sim nam file - try: - exchange_recarray_data = ( - self.model_or_sim.name_file.exchanges.get_data() - ) - except MFDataException as mfde: - message = ( - "An error occurred while retrieving exchange " - "data from the simulation name file. The error " - "occurred while processing gnc file " - f'"{self.filename}".' - ) - raise MFDataException( - mfdata_except=mfde, - package=self._get_pname(), - message=message, - ) - assert exchange_recarray_data is not None - model_1 = None - model_2 = None - for exchange in exchange_recarray_data: - if exchange[1] == exg_file_name: - model_1 = exchange[2] - model_2 = exchange[3] - - # assign models to gnc package - model_dims = [ - modeldimensions.ModelDimensions(model_1, self.simulation_data), - modeldimensions.ModelDimensions(model_2, self.simulation_data), - ] - elif self.parent_file is not None: - model_dims = [] - for md in self.parent_file.dimensions.model_dim: - model_name = md.model_name - model_dims.append( - modeldimensions.ModelDimensions( - model_name, self.simulation_data - ) - ) - else: - model_dims = [ - modeldimensions.ModelDimensions(None, self.simulation_data) - ] - return modeldimensions.PackageDimensions(model_dims, self.structure, self.path) - - def _store_comment(self, line, found_first_block): - # Store comment - if found_first_block: - self.post_block_comments.text += line - else: - self.simulation_data.mfdata[self.path + ("pkg_hdr_comments",)].text += line - - def _write_blocks(self, fd, ext_file_action): - # verify that all blocks are valid - if not self.is_valid(): - message = ( - 'Unable to write out model file "{}" due to the ' - "following error: " - "{} ({})".format(self._filename, self.last_error, self.path) - ) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.model_name, - self._get_pname(), - self.path, - "writing package blocks", - None, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self.simulation_data.debug, - ) - - # write initial comments - pkg_hdr_comments_path = self.path + ("pkg_hdr_comments",) - if pkg_hdr_comments_path in self.simulation_data.mfdata: - self.simulation_data.mfdata[self.path + ("pkg_hdr_comments",)].write( - fd, False - ) - - # loop through blocks - block_num = 1 - for block in self.blocks.values(): - if ( - self.simulation_data.verbosity_level.value - >= VerbosityLevel.verbose.value - ): - print(f" writing block {block.structure.name}...") - # write block - block.write(fd, ext_file_action=ext_file_action) - block_num += 1 - - def get_file_path(self): - """Returns the package file's path. - - Returns - ------- - file path : str - """ - if self.path[0] in self.simulation_data.mfpath.model_relative_path: - return os.path.join( - self.simulation_data.mfpath.get_model_path(self.path[0]), - self._filename, - ) - else: - return os.path.join( - self.simulation_data.mfpath.get_sim_path(), self._filename - ) - - def export(self, f, **kwargs): - """ - Method to export a package to netcdf or shapefile based on the - extension of the file name (.shp for shapefile, .nc for netcdf) - - Parameters - ---------- - f : str - Filename - kwargs : keyword arguments - modelgrid : flopy.discretization.Grid instance - User supplied modelgrid which can be used for exporting - in lieu of the modelgrid associated with the model object - - Returns - ------- - None or Netcdf object - - """ - from .. import export - - return export.utils.package_export(f, self, **kwargs) - - def plot(self, **kwargs): - """ - Plot 2-D, 3-D, transient 2-D, and stress period list (MfList) - package input data - - Parameters - ---------- - **kwargs : dict - filename_base : str - Base file name that will be used to automatically generate - file names for output image files. Plots will be exported as - image files if file_name_base is not None. (default is None) - file_extension : str - Valid matplotlib.pyplot file extension for savefig(). Only - used if filename_base is not None. (default is 'png') - mflay : int - MODFLOW zero-based layer number to return. If None, then all - all layers will be included. (default is None) - kper : int - MODFLOW zero-based stress period number to return. (default is - zero) - key : str - MfList dictionary key. (default is None) - - Returns - ------- - axes : list - Empty list is returned if filename_base is not None. Otherwise - a list of matplotlib.pyplot.axis are returned. - - """ - from ..plot.plotutil import PlotUtilities - - if not self.plottable: - raise TypeError("Simulation level packages are not plottable") - - axes = PlotUtilities._plot_package_helper(self, **kwargs) - return axes - - -class MFChildPackages: - """ - Behind the scenes code for creating an interface to access child packages - from a parent package. This class is automatically constructed by the - FloPy library and is for internal library use only. - - Parameters - ---------- - """ - - def __init__( - self, - model_or_sim, - parent, - pkg_type, - filerecord, - package=None, - package_class=None, - ): - self._packages = [] - self._filerecord = filerecord - if package is not None: - self._packages.append(package) - self._model_or_sim = model_or_sim - self._cpparent = parent - self._pkg_type = pkg_type - self._package_class = package_class - - def __init_subclass__(cls): - """Register package""" - super().__init_subclass__() - PackageContainer.packages_by_abbr[cls.package_abbr] = cls - - def __getattr__(self, attr): - if ( - "_packages" in self.__dict__ - and len(self._packages) > 0 - and hasattr(self._packages[0], attr) - ): - item = getattr(self._packages[0], attr) - return item - raise AttributeError(attr) - - def __getitem__(self, k): - if isinstance(k, int): - if k < len(self._packages): - return self._packages[k] - raise ValueError(f"Package index {k} does not exist.") - - def __setattr__(self, key, value): - if ( - key != "_packages" - and key != "_model_or_sim" - and key != "_cpparent" - and key != "_inattr" - and key != "_filerecord" - and key != "_package_class" - and key != "_pkg_type" - ): - if len(self._packages) == 0: - raise Exception( - "No {} package is currently attached to package" - " {}. Use the initialize method to create a(n) " - "{} package before attempting to access its " - "properties.".format( - self._pkg_type, self._cpparent.filename, self._pkg_type - ) - ) - package = self._packages[0] - setattr(package, key, value) - return - super().__setattr__(key, value) - - def __default_file_path_base(self, file_path, suffix=""): - stem = os.path.split(file_path)[1] - stem_lst = stem.split(".") - file_name = ".".join(stem_lst[:-1]) - if len(stem_lst) > 1: - file_ext = stem_lst[-1] - return f"{file_name}.{file_ext}{suffix}.{self._pkg_type}" - elif suffix != "": - return f"{stem}.{self._pkg_type}" - else: - return f"{stem}.{suffix}.{self._pkg_type}" - - def __file_path_taken(self, possible_path): - for package in self._packages: - # Do case insensitive compare - if package.filename.lower() == possible_path.lower(): - return True - return False - - def next_default_file_path(self): - possible_path = self.__default_file_path_base(self._cpparent.filename) - suffix = 0 - while self.__file_path_taken(possible_path): - possible_path = self.__default_file_path_base( - self._cpparent.filename, suffix - ) - suffix += 1 - return possible_path - - def init_package(self, package, fname, remove_packages=True): - if remove_packages: - # clear out existing packages - self._remove_packages() - elif fname is not None: - self._remove_packages(fname) - if fname is None: - # build a file name - fname = self.next_default_file_path() - package._filename = fname - # check file record variable - found = False - fr_data = self._filerecord.get_data() - if fr_data is not None: - for line in fr_data: - if line[0] == fname: - found = True - if not found: - # append file record variable - self._filerecord.append_data([(fname,)]) - # add the package to the list - self._packages.append(package) - - def _update_filename(self, old_fname, new_fname): - file_record = self._filerecord.get_data() - new_file_record_data = [] - if file_record is not None: - file_record_data = file_record[0] - for item in file_record_data: - base, fname = os.path.split(item) - if fname.lower() == old_fname.lower(): - if base: - new_file_record_data.append((os.path.join(base, new_fname),)) - else: - new_file_record_data.append((new_fname,)) - else: - new_file_record_data.append((item,)) - else: - new_file_record_data.append((new_fname,)) - self._filerecord.set_data(new_file_record_data) - - def _append_package(self, package, fname, update_frecord=True): - if fname is None: - # build a file name - fname = self.next_default_file_path() - package._filename = fname - - if update_frecord: - # set file record variable - file_record = self._filerecord.get_data() - file_record_data = file_record - new_file_record_data = [] - for item in file_record_data: - new_file_record_data.append((item[0],)) - new_file_record_data.append((fname,)) - self._filerecord.set_data(new_file_record_data) - - for existing_pkg in self._packages: - if existing_pkg is package: - # do not add the same package twice - return - # add the package to the list - self._packages.append(package) - - def _remove_packages(self, fname=None, only_pop_from_list=False): - rp_list = [] - for idx, package in enumerate(self._packages): - if fname is None or package.filename == fname: - if not only_pop_from_list: - self._model_or_sim.remove_package(package) - rp_list.append(idx) - for idx in reversed(rp_list): - self._packages.pop(idx) From dd6bcae5d442cac61a88a7c23d9febd32edeccc2 Mon Sep 17 00:00:00 2001 From: mjreno Date: Tue, 12 Aug 2025 17:10:26 -0400 Subject: [PATCH 11/44] fix structured dataset --- flopy/discretization/structuredgrid.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/flopy/discretization/structuredgrid.py b/flopy/discretization/structuredgrid.py index 787dc965cd..001137190b 100644 --- a/flopy/discretization/structuredgrid.py +++ b/flopy/discretization/structuredgrid.py @@ -1876,6 +1876,8 @@ def _layered_mesh_dataset(self, ds, modeltime=None): ds["mesh_face_nodes"].attrs["_FillValue"] = FILLNA_INT32 ds["mesh_face_nodes"].attrs["start_index"] = np.int32(1) + return ds + def _structured_dataset(self, ds, modeltime=None): lenunits = {0: "m", 1: "ft", 2: "m", 3: "m"} @@ -1933,6 +1935,8 @@ def _structured_dataset(self, ds, modeltime=None): ds["x"].attrs["long_name"] = "Easting" ds["x"].attrs["bounds"] = "x_bnds" + return ds + def _set_structured_iverts(self): """ Build a list of the vertices that define each model cell and the x, y From 7f54d0b5cd7eec9e18c5faaf2b9d09b44fc77690 Mon Sep 17 00:00:00 2001 From: mjreno Date: Wed, 13 Aug 2025 15:29:12 -0400 Subject: [PATCH 12/44] add update_dataset functions --- .docs/Notebooks/netcdf01_tutorial.py | 157 ++++++++++++--------------- .docs/Notebooks/netcdf02_tutorial.py | 24 ++-- flopy/mf6/mfmodel.py | 11 ++ flopy/mf6/mfpackage.py | 46 ++++++++ flopy/mf6/utils/codegen/filters.py | 17 ++- 5 files changed, 151 insertions(+), 104 deletions(-) diff --git a/.docs/Notebooks/netcdf01_tutorial.py b/.docs/Notebooks/netcdf01_tutorial.py index cd138fd3f3..a606fcd6f9 100644 --- a/.docs/Notebooks/netcdf01_tutorial.py +++ b/.docs/Notebooks/netcdf01_tutorial.py @@ -242,44 +242,12 @@ def create_sim(ws): return sim -# ## Create helper function to update dataset -# -# This function updates an xarray dataset to add variables described -# in a FloPy provided dictionary. -# -# The dimmap variable relates NetCDF dimension names to a value. - - -# A subroutine that can update an xarray dataset with package -# netcdf information stored in a dict -def add_netcdf_vars(dataset, nc_info, dimmap): - def _data_shape(shape): - dims_l = [] - for d in shape: - dims_l.append(dimmap[d]) - - return dims_l - - for v in nc_info: - varname = nc_info[v]["varname"] - data = np.full( - _data_shape(nc_info[v]["netcdf_shape"]), - nc_info[v]["attrs"]["_FillValue"], - dtype=nc_info[v]["xarray_type"], - ) - var_d = {varname: (nc_info[v]["netcdf_shape"], data)} - dataset = dataset.assign(var_d) - for a in nc_info[v]["attrs"]: - dataset[varname].attrs[a] = nc_info[v]["attrs"][a] - - return dataset - - # ## Create simulation workspace # create temporary directories -temp_dir = TemporaryDirectory() -workspace = Path(temp_dir.name) +# temp_dir = TemporaryDirectory() +# workspace = Path(temp_dir.name) +workspace = Path("./working") # ## Write and run baseline simulation @@ -323,49 +291,39 @@ def _data_shape(shape): # First, retrieve and store the netcdf info dictionary and display # its contents. Then, in the following step, update the dataset with # the model scoped attributes defined in the dictionary. +# +# These 2 operations can also be accomplised by calling `update_dataset()` +# on the model object. Analogous functions for the package are shown +# below. # get model netcdf info nc_info = gwf.netcdf_info() pprint(nc_info) -# update dataset with required attributes +# update dataset directly with required attributes for a in nc_info["attrs"]: ds.attrs[a] = nc_info["attrs"][a] -# ## Map dataset dimension names to values - -# define dimensional info -dimmap = { - "time": sum(gwf.modeltime.nstp), - "z": gwf.modelgrid.nlay, - "y": gwf.modelgrid.nrow, - "x": gwf.modelgrid.ncol, -} - -# ## Access package NetCDF attributes -# -# Access package scoped NetCDF details by storing the dictionary returned -# from `netcdf_info()`. We need to set package variable attributes that are -# stored in the package netcdf info dict, but we also need other information -# that is relevant to creating the variables themselves. +# ## Update the dataset with supported `DIS` arrays # -# The contents of the info dictionary are shown and then, in the following -# step, the dictionary and the dataset are passed to a helper routine that -# create the intended array variables. +# Add NetCDF supported data arrays in package to dataset. Internally, this call +# uses a `netcdf_info()` package dictionary to determine candidate variables +# and relevant information about them. Alternatively, this dictionary can +# be directly accessed, updated, and passed to the `update_dataset()` function. +# That workflow will be demonstrated in the `NPF` package update which follows. -# get dis package netcdf info +# update dataset with `DIS` arrays dis = gwf.get_package("dis") -nc_info = dis.netcdf_info() -pprint(nc_info) - -# create dis dataset variables -ds = add_netcdf_vars(ds, nc_info, dimmap) +ds = dis.update_dataset(ds) # ## Update array data # # We have created dataset array variables for the package but they do not yet # define the expected input data for MODFLOW 6. We will take advantage of the # existing simulation objects and update the dataset. +# +# Default dataset variable names are defined in the package `netcdf_info()` +# dictionary. # update dataset from dis arrays ds["dis_delr"].values = dis.delr.get_data() @@ -378,7 +336,7 @@ def _data_shape(shape): # # MODFLOW 6 input data for the package is now in the dataset. Once the NetCDF # file is generated, we need to configure MODFLOW 6 so that it looks to that -# file for the package array input. The ASCII file will no longer defined the +# file for the package array input. The ASCII file will no longer define the # arrays- instead the array names will be followed by the NETCDF keyword. # # We will simply overwrite the entire MODFLOW 6 `DIS` package input file with the @@ -404,21 +362,46 @@ def _data_shape(shape): with open(workspace / "netcdf" / "uzf01.dis", "r") as fh: print(fh.read()) -# ## Update MODFLOW 6 package input file +# ## Access `NPF` package NetCDF attributes # -# Follow the same process as above for the `NPF` package. +# Access package scoped NetCDF details by storing the dictionary returned +# from `netcdf_info()`. We need to set package variable attributes that are +# stored in the package netcdf info dict, but we also need other information +# that is relevant to creating the variables themselves. +# +# The contents of the info dictionary are shown and then, in the following +# step, the dictionary and the dataset are passed to a helper routine that +# create the intended array variables. # get npf package netcdf info npf = gwf.get_package("npf") nc_info = npf.netcdf_info() pprint(nc_info) -# create npf dataset variables -ds = add_netcdf_vars(ds, nc_info, dimmap) +# ## Update package `netcdf_info` dictionary and dataset +# +# Here we replace the default name for the `NPF K` input parameter and add +# the `standard_name` attribute to it's attribute dictionary. The dictionary +# is then passed to the `update_dataset()` function. Note the udpated name +# is used in the subsequent block when updating the array values. + +# update dataset with `NPF` arrays +nc_info["k"]["varname"] = "npf_k_updated" +nc_info["k"]["attrs"]["standard_name"] = "soil_hydraulic_conductivity_at_saturation" +ds = npf.update_dataset(ds, netcdf_info=nc_info) + +# ## Update array data # update dataset from npf arrays ds["npf_icelltype"].values = npf.icelltype.get_data() -ds["npf_k"].values = npf.k.get_data() +ds["npf_k_updated"].values = npf.k.get_data() + +# ## Show dataset `NPF K` parameter with udpates + +# print dataset npf k variable +print(ds["npf_k_updated"]) + +# ## Update MODFLOW 6 package input file # rewrite mf6 npf input to read from netcdf with open(workspace / "netcdf" / "uzf01.npf", "w") as f: @@ -431,21 +414,13 @@ def _data_shape(shape): with open(workspace / "netcdf" / "uzf01.npf", "r") as fh: print(fh.read()) -# ## Update MODFLOW 6 package input file -# -# Follow the same process as above for the `GHBG` package. The difference is -# that this is PERIOD input and therefore stored as timeseries data in the -# NetCDF file. As NETCDF timeseries are defined in terms of total number of -# simulation steps, care must be taken in the translation of FloPy period -# data to the timeseries. +# ## Update the dataset with supported `GHBG` arrays -# get ghbg package netcdf info +# update dataset with 'GHBG' arrays ghbg = gwf.get_package("ghbg_0") -nc_info = ghbg.netcdf_info() -pprint(nc_info) +ds = ghbg.update_dataset(ds) -# create ghbg dataset variables -ds = add_netcdf_vars(ds, nc_info, dimmap) +# ## Update array data # update bhead netcdf array from flopy perioddata # timeseries step index is first of stress period @@ -459,17 +434,7 @@ def _data_shape(shape): istp = sum(gwf.modeltime.nstp[0:p]) ds["ghbg_0_cond"].values[istp] = ghbg.cond.get_data()[p] -# ## Display generated dataset - -# show the dataset -print(ds) - -# ## Export generated dataset to NetCDF - -# write dataset to netcdf -ds.to_netcdf( - workspace / "netcdf/uzf01.structured.nc", format="NETCDF4", engine="netcdf4" -) +# ## Update MODFLOW 6 package input file # rewrite mf6 ghbg input to read from netcdf with open(workspace / "netcdf/uzf01.ghbg", "w") as f: @@ -486,6 +451,18 @@ def _data_shape(shape): with open(workspace / "netcdf" / "uzf01.ghbg", "r") as fh: print(fh.read()) +# ## Display generated dataset + +# show the dataset +print(ds) + +# ## Export generated dataset to NetCDF + +# write dataset to netcdf +ds.to_netcdf( + workspace / "netcdf/uzf01.structured.nc", format="NETCDF4", engine="netcdf4" +) + # ## Run MODFLOW 6 simulation with NetCDF input # # The simulation generated by this tutorial should be runnable by diff --git a/.docs/Notebooks/netcdf02_tutorial.py b/.docs/Notebooks/netcdf02_tutorial.py index 68caae2892..981323f7aa 100644 --- a/.docs/Notebooks/netcdf02_tutorial.py +++ b/.docs/Notebooks/netcdf02_tutorial.py @@ -12,8 +12,6 @@ # display_name: Python 3 (ipykernel) # language: python # name: python3 -# metadata: -# section: mf6 # --- # # MODFLOW 6: Generate MODFLOW 6 NetCDF input from existing FloPy sim @@ -165,7 +163,7 @@ def create_sim(ws): uzf_spd.update({t: spd}) - # Work up the GHB / GHBG boundary + # Work up the GHBG boundary ghb_ids = [(ncol - 1) + i * ncol for i in range(nrow)] abhead = np.full((nlay, ncpl), DNODATA, dtype=float) acond = np.full((nlay, ncpl), DNODATA, dtype=float) @@ -464,16 +462,6 @@ def _data_shape(shape): l ].flatten() -# ## Display generated dataset - -# show the dataset -print(ds) - -# ## Export generated dataset to NetCDF - -# write dataset to netcdf -ds.to_netcdf(workspace / "netcdf/uzf02.layered.nc", format="NETCDF4", engine="netcdf4") - # rewrite mf6 ghbg input to read from netcdf with open(workspace / "netcdf/uzf02.ghbg", "w") as f: f.write("BEGIN options\n") @@ -487,6 +475,16 @@ def _data_shape(shape): with open(workspace / "netcdf" / "uzf02.ghbg", "r") as fh: print(fh.read()) +# ## Display generated dataset + +# show the dataset +print(ds) + +# ## Export generated dataset to NetCDF + +# write dataset to netcdf +ds.to_netcdf(workspace / "netcdf/uzf02.layered.nc", format="NETCDF4", engine="netcdf4") + # ## Run MODFLOW 6 simulation with NetCDF input # # The simulation generated by this tutorial should be runnable by diff --git a/flopy/mf6/mfmodel.py b/flopy/mf6/mfmodel.py index 621887121c..3cd301ecef 100644 --- a/flopy/mf6/mfmodel.py +++ b/flopy/mf6/mfmodel.py @@ -2236,3 +2236,14 @@ def netcdf_info(self, mesh=None): return MFModel.netcdf_model( self.name, self.model_type, self.get_grid_type(), mesh ) + + def update_dataset(self, dataset, netcdf_info=None, mesh=None): + if netcdf_info is None: + nc_info = self.netcdf_info(mesh=mesh) + else: + nc_info = netcdf_info + + for a in nc_info["attrs"]: + dataset.attrs[a] = nc_info["attrs"][a] + + return dataset diff --git a/flopy/mf6/mfpackage.py b/flopy/mf6/mfpackage.py index 746b4a3ddd..828ff8e640 100644 --- a/flopy/mf6/mfpackage.py +++ b/flopy/mf6/mfpackage.py @@ -3507,6 +3507,8 @@ def _add_entry(tagname, iaux=None, layer=None): a["attrs"]["_FillValue"] = FILLNA_DBL elif data_item.block_name == "period": a["attrs"]["_FillValue"] = DNODATA + if data_item.longname is not None: + a["attrs"]["longname"] = data_item.longname # set dictionary attrs[key] = a @@ -3595,6 +3597,50 @@ def netcdf_info(self, mesh=None): return attrs + def update_dataset(self, dataset, netcdf_info=None, mesh=None): + if netcdf_info is None: + nc_info = self.netcdf_info(mesh=mesh) + else: + nc_info = netcdf_info + + modelgrid = self.model_or_sim.modelgrid + modeltime = self.model_or_sim.modeltime + + if mesh is None: + dimmap = { + "time": sum(modeltime.nstp), + "z": modelgrid.nlay, + "y": modelgrid.nrow, + "x": modelgrid.ncol, + } + elif mesh.upper() == "LAYERED": + dimmap = { + "time": sum(gwf.modeltime.nstp), + "z": gwf.modelgrid.nlay, + "nmesh_face": gwf.modelgrid.ncpl, + } + + def _data_shape(shape): + dims_l = [] + for d in shape: + dims_l.append(dimmap[d]) + + return dims_l + + for v in nc_info: + varname = nc_info[v]["varname"] + data = np.full( + _data_shape(nc_info[v]["netcdf_shape"]), + nc_info[v]["attrs"]["_FillValue"], + dtype=nc_info[v]["xarray_type"], + ) + var_d = {varname: (nc_info[v]["netcdf_shape"], data)} + dataset = dataset.assign(var_d) + for a in nc_info[v]["attrs"]: + dataset[varname].attrs[a] = nc_info[v]["attrs"][a] + + return dataset + class MFChildPackages: """ diff --git a/flopy/mf6/utils/codegen/filters.py b/flopy/mf6/utils/codegen/filters.py index 5851b46040..e63fb8d117 100644 --- a/flopy/mf6/utils/codegen/filters.py +++ b/flopy/mf6/utils/codegen/filters.py @@ -333,7 +333,22 @@ def _var(var: dict) -> List[str]: if k not in exclude ] - return [_var(var) for var in list(definition.values(multi=True))] + def __dfn(): + def _var(var: dict) -> List[str]: + exclude = ["description"] + name = var["name"] + subpkg = dfn.get("fkeys", dict()).get(name, None) + if subpkg: + var["construct_package"] = subpkg["abbr"] + var["construct_data"] = subpkg["val"] + var["parameter_name"] = subpkg["param"] + return [ + " ".join([k, v]).strip() + for k, v in var.items() + if k not in exclude + ] + + return [_var(var) for var in list(definition.values(multi=True))] return [["header"] + _meta()] + __dfn() From b5b68fe6daaf97d3d7a193d6cdca4ddb56a93849 Mon Sep 17 00:00:00 2001 From: mjreno Date: Thu, 14 Aug 2025 10:16:25 -0400 Subject: [PATCH 13/44] limited flopy mf6 write support for netcdf --- .docs/Notebooks/netcdf01_tutorial.py | 348 +++++-------------- examples/data/mf6/netcdf/uzf01/mfsim.nam | 19 + examples/data/mf6/netcdf/uzf01/uzf01.dis | 322 +++++++++++++++++ examples/data/mf6/netcdf/uzf01/uzf01.ghb.obs | 10 + examples/data/mf6/netcdf/uzf01/uzf01.ghbg | 213 ++++++++++++ examples/data/mf6/netcdf/uzf01/uzf01.ic | 9 + examples/data/mf6/netcdf/uzf01/uzf01.ims | 22 ++ examples/data/mf6/netcdf/uzf01/uzf01.nam | 17 + examples/data/mf6/netcdf/uzf01/uzf01.npf | 11 + examples/data/mf6/netcdf/uzf01/uzf01.obs | 10 + examples/data/mf6/netcdf/uzf01/uzf01.oc | 14 + examples/data/mf6/netcdf/uzf01/uzf01.sto | 17 + examples/data/mf6/netcdf/uzf01/uzf01.tdis | 13 + examples/data/mf6/netcdf/uzf01/uzf01.uzf | 123 +++++++ examples/data/mf6/netcdf/uzf01/uzf01.uzf.obs | 13 + flopy/mf6/data/mfdataarray.py | 35 +- flopy/mf6/data/mfdatastorage.py | 11 +- flopy/mf6/data/mfstructure.py | 39 ++- flopy/mf6/mfbase.py | 6 + flopy/mf6/mfmodel.py | 50 ++- flopy/mf6/mfpackage.py | 55 +++ flopy/mf6/mfsimbase.py | 13 +- 22 files changed, 1076 insertions(+), 294 deletions(-) create mode 100644 examples/data/mf6/netcdf/uzf01/mfsim.nam create mode 100644 examples/data/mf6/netcdf/uzf01/uzf01.dis create mode 100644 examples/data/mf6/netcdf/uzf01/uzf01.ghb.obs create mode 100644 examples/data/mf6/netcdf/uzf01/uzf01.ghbg create mode 100644 examples/data/mf6/netcdf/uzf01/uzf01.ic create mode 100644 examples/data/mf6/netcdf/uzf01/uzf01.ims create mode 100644 examples/data/mf6/netcdf/uzf01/uzf01.nam create mode 100644 examples/data/mf6/netcdf/uzf01/uzf01.npf create mode 100644 examples/data/mf6/netcdf/uzf01/uzf01.obs create mode 100644 examples/data/mf6/netcdf/uzf01/uzf01.oc create mode 100644 examples/data/mf6/netcdf/uzf01/uzf01.sto create mode 100644 examples/data/mf6/netcdf/uzf01/uzf01.tdis create mode 100644 examples/data/mf6/netcdf/uzf01/uzf01.uzf create mode 100644 examples/data/mf6/netcdf/uzf01/uzf01.uzf.obs diff --git a/.docs/Notebooks/netcdf01_tutorial.py b/.docs/Notebooks/netcdf01_tutorial.py index a606fcd6f9..c5037568e5 100644 --- a/.docs/Notebooks/netcdf01_tutorial.py +++ b/.docs/Notebooks/netcdf01_tutorial.py @@ -12,8 +12,6 @@ # display_name: Python 3 (ipykernel) # language: python # name: python3 -# metadata: -# section: mf6 # --- # # MODFLOW 6: Generate MODFLOW 6 NetCDF input from existing FloPy sim @@ -39,7 +37,9 @@ from pprint import pformat, pprint from tempfile import TemporaryDirectory +import git import numpy as np +import pooch import xarray as xr import flopy @@ -47,212 +47,60 @@ print(sys.version) print(f"flopy version: {flopy.__version__}") -# ## Define `DNODATA` constant -# -# `DNODATA` is an important constant for MODFLOW 6 timeseries grid input -# data. It signifies that the cell has no data defined for the time step -# in question. These cell values are discarded and have no impact on the -# simulation. +sim_name = "uzf01" + +# Check if we are in the repository and define the data path. + +try: + root = Path(git.Repo(".", search_parent_directories=True).working_dir) +except: + root = None + +data_path = root / "examples" / "data" / "mf6" / "netcdf" if root else Path.cwd() +print(data_path) + +file_names = { + "mfsim.nam": None, + "uzf01.dis": None, + "uzf01.ghb.obs": None, + "uzf01.ghbg": None, + "uzf01.ic": None, + "uzf01.ims": None, + "uzf01.nam": None, + "uzf01.npf": None, + "uzf01.obs": None, + "uzf01.oc": None, + "uzf01.sto": None, + "uzf01.tdis": None, + "uzf01.uzf": None, + "uzf01.uzf.obs": None, +} + +# for fname, fhash in file_names.items(): +# pooch.retrieve( +# url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{sim_name}/{fname}", +# fname=fname, +# path=data_path / sim_name, +# known_hash=fhash, +# ) + +# ## Create simulation workspace -# DNODATA constant -DNODATA = 3.0e30 +# create temporary directories +temp_dir = TemporaryDirectory() +workspace = Path(temp_dir.name) -# ## Define ASCII input baseline simulation +# ## Load and run baseline simulation # # For the purposes of this tutorial, the specifics of this simulation # other than it is a candidate for NetCDF input are not a focus. It -# is a NetCDF input candidate because it defines a candidate model type +# is a NetCDF input candidate because it defines a supported model type # (`GWF6`) with a structured discretization and packages that support # NetCDF input parameters. -# -# A NetCDF dataset will be created from array data in the `DIS`, `NPF` and -# `GHBG` packages. Data will be copied from the package objects into dataset -# arrays. - - -# A FloPy ASCII base simulation that will be updated use netcdf inputs -def create_sim(ws): - name = "uzf01" - perlen = [500.0] - nper = len(perlen) - nstp = [10] - tsmult = nper * [1.0] - crs = "EPSG:26916" - nlay, nrow, ncol = 100, 1, 1 - delr = 1.0 - delc = 1.0 - delv = 1.0 - top = 100.0 - botm = [top - (k + 1) * delv for k in range(nlay)] - strt = 0.5 - hk = 1.0 - laytyp = 1 - ss = 0.0 - sy = 0.1 - - tdis_rc = [] - for i in range(nper): - tdis_rc.append((perlen[i], nstp[i], tsmult[i])) - - # build MODFLOW 6 files - sim = flopy.mf6.MFSimulation( - sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws - ) - - # create tdis package - tdis = flopy.mf6.ModflowTdis(sim, time_units="DAYS", nper=nper, perioddata=tdis_rc) - - # create iterative model solution and register the gwf model with it - nouter, ninner = 100, 10 - hclose, rclose, relax = 1.5e-6, 1e-6, 0.97 - imsgwf = flopy.mf6.ModflowIms( - sim, - print_option="SUMMARY", - outer_dvclose=hclose, - outer_maximum=nouter, - under_relaxation="DBD", - under_relaxation_theta=0.7, - inner_maximum=ninner, - inner_dvclose=hclose, - rcloserecord=rclose, - linear_acceleration="BICGSTAB", - scaling_method="NONE", - reordering_method="NONE", - relaxation_factor=relax, - ) - - # create gwf model - newtonoptions = "NEWTON UNDER_RELAXATION" - gwf = flopy.mf6.ModflowGwf( - sim, - modelname=name, - newtonoptions=newtonoptions, - save_flows=True, - ) - - dis = flopy.mf6.ModflowGwfdis( - gwf, - crs=crs, - nlay=nlay, - nrow=nrow, - ncol=ncol, - delr=delr, - delc=delc, - top=top, - botm=botm, - idomain=np.ones((nlay, nrow, ncol), dtype=int), - ) - - # initial conditions - ic = flopy.mf6.ModflowGwfic(gwf, strt=strt) - - # node property flow - npf = flopy.mf6.ModflowGwfnpf(gwf, save_flows=False, icelltype=laytyp, k=hk) - # storage - sto = flopy.mf6.ModflowGwfsto( - gwf, - save_flows=False, - iconvert=laytyp, - ss=ss, - sy=sy, - steady_state={0: False}, - transient={0: True}, - ) - - # ghbg - ghb_obs = {f"{name}.ghb.obs.csv": [("100_1_1", "GHB", (99, 0, 0))]} - bhead = np.full(nlay * nrow * ncol, DNODATA, dtype=float) - cond = np.full(nlay * nrow * ncol, DNODATA, dtype=float) - bhead[nlay - 1] = 1.5 - cond[nlay - 1] = 1.0 - ghb = flopy.mf6.ModflowGwfghbg( - gwf, - print_input=True, - print_flows=True, - bhead=bhead, - cond=cond, - save_flows=False, - ) - - ghb.obs.initialize( - filename=f"{name}.ghb.obs", - digits=20, - print_input=True, - continuous=ghb_obs, - ) - - # note: for specifying lake number, use fortran indexing! - uzf_obs = { - f"{name}.uzf.obs.csv": [ - ("wc 02", "water-content", 2, 0.5), - ("wc 50", "water-content", 50, 0.5), - ("wcbn 02", "water-content", "uzf 002", 0.5), - ("wcbn 50", "water-content", "UZF 050", 0.5), - ("rch 02", "uzf-gwrch", "uzf 002"), - ("rch 50", "uzf-gwrch", "uzf 050"), - ] - } - - sd = 0.1 - vks = hk - thtr = 0.05 - thti = thtr - thts = sy - eps = 4 - uzf_pkdat = [[0, (0, 0, 0), 1, 1, sd, vks, thtr, thts, thti, eps, "uzf 001"]] + [ - [k, (k, 0, 0), 0, k + 1, sd, vks, thtr, thts, thti, eps, f"uzf {k + 1:03d}"] - for k in range(1, nlay - 1) - ] - uzf_pkdat[-1][3] = -1 - infiltration = 2.01 - uzf_spd = {0: [[0, infiltration, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]} - uzf = flopy.mf6.ModflowGwfuzf( - gwf, - print_input=True, - print_flows=True, - save_flows=True, - boundnames=True, - ntrailwaves=15, - nwavesets=40, - nuzfcells=len(uzf_pkdat), - packagedata=uzf_pkdat, - perioddata=uzf_spd, - budget_filerecord=f"{name}.uzf.bud", - budgetcsv_filerecord=f"{name}.uzf.bud.csv", - observations=uzf_obs, - filename=f"{name}.uzf", - ) - - # output control - oc = flopy.mf6.ModflowGwfoc( - gwf, - budget_filerecord=f"{name}.bud", - head_filerecord=f"{name}.hds", - headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")], - saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], - printrecord=[("HEAD", "LAST"), ("BUDGET", "ALL")], - ) - - obs_lst = [] - obs_lst.append(["obs1", "head", (0, 0, 0)]) - obs_lst.append(["obs2", "head", (1, 0, 0)]) - obs_dict = {f"{name}.obs.csv": obs_lst} - obs = flopy.mf6.ModflowUtlobs(gwf, pname="head_obs", digits=20, continuous=obs_dict) - - return sim - - -# ## Create simulation workspace -# create temporary directories -# temp_dir = TemporaryDirectory() -# workspace = Path(temp_dir.name) -workspace = Path("./working") - -# ## Write and run baseline simulation - -# run the non-netcdf simulation -sim = create_sim(ws=workspace) +# load and run the non-netcdf simulation +sim = flopy.mf6.MFSimulation.load(sim_ws=data_path / sim_name) +sim.set_sim_path(workspace) sim.write_simulation() success, buff = sim.run_simulation(silent=True, report=True) assert success, pformat(buff) @@ -262,15 +110,29 @@ def create_sim(ws): # Reset the simulation path and set the `GWF` name file `nc_filerecord` # attribute to the name of the intended input NetCDF file. Display # the resultant name file changes. +# +# When we write the updated simulation, all packages that support NetCDF +# input parameters will be converted. We will therefore need to create a +# NetCDF input file containing arrays for the `DIS`, `NPF`, `IC`, `STO`, +# and `GHBG` packages. Data will be copied from the package objects into +# dataset arrays. +# +# Flopy does not currently generate the NetCDF input file. This tutorial +# shows one way that can be accomplished. # create directory for netcdf sim -# set model name file nc_filerecord attribute to export name sim.set_sim_path(workspace / "netcdf") +# set model name file nc_filerecord attribute to export name gwf = sim.get_model("uzf01") gwf.name_file.nc_filerecord = "uzf01.structured.nc" -sim.write_simulation() +# write simulation with ASCII inputs tagged for NetCDF +sim.write_simulation(netcdf=True) +# show name file with NetCDF input configured with open(workspace / "netcdf" / "uzf01.nam", "r") as fh: print(fh.read()) +# show example package file with NetCDF input configured +with open(workspace / "netcdf" / "uzf01.ic", "r") as fh: + print(fh.read()) # ## Create dataset # @@ -292,7 +154,7 @@ def create_sim(ws): # its contents. Then, in the following step, update the dataset with # the model scoped attributes defined in the dictionary. # -# These 2 operations can also be accomplised by calling `update_dataset()` +# These 2 operations can also be accomplished by calling `update_dataset()` # on the model object. Analogous functions for the package are shown # below. @@ -332,36 +194,6 @@ def create_sim(ws): ds["dis_botm"].values = dis.botm.get_data() ds["dis_idomain"].values = dis.idomain.get_data() -# ## Update MODFLOW 6 package input file -# -# MODFLOW 6 input data for the package is now in the dataset. Once the NetCDF -# file is generated, we need to configure MODFLOW 6 so that it looks to that -# file for the package array input. The ASCII file will no longer define the -# arrays- instead the array names will be followed by the NETCDF keyword. -# -# We will simply overwrite the entire MODFLOW 6 `DIS` package input file with the -# following code block. - -# rewrite mf6 dis input to read from netcdf -with open(workspace / "netcdf" / "uzf01.dis", "w") as f: - f.write("BEGIN options\n") - f.write(" crs EPSG:26916\n") - f.write("END options\n\n") - f.write("BEGIN dimensions\n") - f.write(" NLAY 100\n") - f.write(" NROW 1\n") - f.write(" NCOL 1\n") - f.write("END dimensions\n\n") - f.write("BEGIN griddata\n") - f.write(" delr NETCDF\n") - f.write(" delc NETCDF\n") - f.write(" top NETCDF\n") - f.write(" botm NETCDF\n") - f.write(" idomain NETCDF\n") - f.write("END griddata\n") -with open(workspace / "netcdf" / "uzf01.dis", "r") as fh: - print(fh.read()) - # ## Access `NPF` package NetCDF attributes # # Access package scoped NetCDF details by storing the dictionary returned @@ -382,7 +214,7 @@ def create_sim(ws): # # Here we replace the default name for the `NPF K` input parameter and add # the `standard_name` attribute to it's attribute dictionary. The dictionary -# is then passed to the `update_dataset()` function. Note the udpated name +# is then passed to the `update_dataset()` function. Note the updated name # is used in the subsequent block when updating the array values. # update dataset with `NPF` arrays @@ -396,23 +228,26 @@ def create_sim(ws): ds["npf_icelltype"].values = npf.icelltype.get_data() ds["npf_k_updated"].values = npf.k.get_data() -# ## Show dataset `NPF K` parameter with udpates +# ## Show dataset `NPF K` parameter with updates # print dataset npf k variable print(ds["npf_k_updated"]) -# ## Update MODFLOW 6 package input file - -# rewrite mf6 npf input to read from netcdf -with open(workspace / "netcdf" / "uzf01.npf", "w") as f: - f.write("BEGIN options\n") - f.write("END options\n\n") - f.write("BEGIN griddata\n") - f.write(" icelltype NETCDF\n") - f.write(" k NETCDF\n") - f.write("END griddata\n") -with open(workspace / "netcdf" / "uzf01.npf", "r") as fh: - print(fh.read()) +# # Update the dataset with supported `IC` arrays + +# ic +ic = gwf.get_package("ic") +ds = ic.update_dataset(ds) +ds["ic_strt"].values = ic.strt.get_data() + +# # Update the dataset with supported `STO` arrays + +# storage +sto = gwf.get_package("sto") +ds = sto.update_dataset(ds) +ds["sto_iconvert"].values = sto.iconvert.get_data() +ds["sto_sy"].values = sto.sy.get_data() +ds["sto_ss"].values = sto.ss.get_data() # ## Update the dataset with supported `GHBG` arrays @@ -434,23 +269,6 @@ def create_sim(ws): istp = sum(gwf.modeltime.nstp[0:p]) ds["ghbg_0_cond"].values[istp] = ghbg.cond.get_data()[p] -# ## Update MODFLOW 6 package input file - -# rewrite mf6 ghbg input to read from netcdf -with open(workspace / "netcdf/uzf01.ghbg", "w") as f: - f.write("BEGIN options\n") - f.write(" READARRAYGRID\n") - f.write(" PRINT_INPUT\n") - f.write(" PRINT_FLOWS\n") - f.write(" OBS6 FILEIN uzf01.ghb.obs\n") - f.write("END options\n\n") - f.write("BEGIN period 1\n") - f.write(" bhead NETCDF\n") - f.write(" cond NETCDF\n") - f.write("END period 1\n") -with open(workspace / "netcdf" / "uzf01.ghbg", "r") as fh: - print(fh.read()) - # ## Display generated dataset # show the dataset @@ -471,3 +289,5 @@ def create_sim(ws): # success, buff = sim.run_simulation(silent=True, report=True) # assert success, pformat(buff) + + diff --git a/examples/data/mf6/netcdf/uzf01/mfsim.nam b/examples/data/mf6/netcdf/uzf01/mfsim.nam new file mode 100644 index 0000000000..0bf43b6cfd --- /dev/null +++ b/examples/data/mf6/netcdf/uzf01/mfsim.nam @@ -0,0 +1,19 @@ +# File generated by Flopy version 3.10.0.dev3 on 08/13/2025 at 16:16:10. +BEGIN options +END options + +BEGIN timing + TDIS6 uzf01.tdis +END timing + +BEGIN models + gwf6 uzf01.nam uzf01 +END models + +BEGIN exchanges +END exchanges + +BEGIN solutiongroup 1 + ims6 uzf01.ims uzf01 +END solutiongroup 1 + diff --git a/examples/data/mf6/netcdf/uzf01/uzf01.dis b/examples/data/mf6/netcdf/uzf01/uzf01.dis new file mode 100644 index 0000000000..4f9500adf2 --- /dev/null +++ b/examples/data/mf6/netcdf/uzf01/uzf01.dis @@ -0,0 +1,322 @@ +# File generated by Flopy version 3.10.0.dev3 on 08/13/2025 at 16:16:10. +BEGIN options + crs EPSG:26916 +END options + +BEGIN dimensions + NLAY 100 + NROW 1 + NCOL 1 +END dimensions + +BEGIN griddata + delr + CONSTANT 1.00000000 + delc + CONSTANT 1.00000000 + top + CONSTANT 100.00000000 + botm LAYERED + CONSTANT 99.00000000 + CONSTANT 98.00000000 + CONSTANT 97.00000000 + CONSTANT 96.00000000 + CONSTANT 95.00000000 + CONSTANT 94.00000000 + CONSTANT 93.00000000 + CONSTANT 92.00000000 + CONSTANT 91.00000000 + CONSTANT 90.00000000 + CONSTANT 89.00000000 + CONSTANT 88.00000000 + CONSTANT 87.00000000 + CONSTANT 86.00000000 + CONSTANT 85.00000000 + CONSTANT 84.00000000 + CONSTANT 83.00000000 + CONSTANT 82.00000000 + CONSTANT 81.00000000 + CONSTANT 80.00000000 + CONSTANT 79.00000000 + CONSTANT 78.00000000 + CONSTANT 77.00000000 + CONSTANT 76.00000000 + CONSTANT 75.00000000 + CONSTANT 74.00000000 + CONSTANT 73.00000000 + CONSTANT 72.00000000 + CONSTANT 71.00000000 + CONSTANT 70.00000000 + CONSTANT 69.00000000 + CONSTANT 68.00000000 + CONSTANT 67.00000000 + CONSTANT 66.00000000 + CONSTANT 65.00000000 + CONSTANT 64.00000000 + CONSTANT 63.00000000 + CONSTANT 62.00000000 + CONSTANT 61.00000000 + CONSTANT 60.00000000 + CONSTANT 59.00000000 + CONSTANT 58.00000000 + CONSTANT 57.00000000 + CONSTANT 56.00000000 + CONSTANT 55.00000000 + CONSTANT 54.00000000 + CONSTANT 53.00000000 + CONSTANT 52.00000000 + CONSTANT 51.00000000 + CONSTANT 50.00000000 + CONSTANT 49.00000000 + CONSTANT 48.00000000 + CONSTANT 47.00000000 + CONSTANT 46.00000000 + CONSTANT 45.00000000 + CONSTANT 44.00000000 + CONSTANT 43.00000000 + CONSTANT 42.00000000 + CONSTANT 41.00000000 + CONSTANT 40.00000000 + CONSTANT 39.00000000 + CONSTANT 38.00000000 + CONSTANT 37.00000000 + CONSTANT 36.00000000 + CONSTANT 35.00000000 + CONSTANT 34.00000000 + CONSTANT 33.00000000 + CONSTANT 32.00000000 + CONSTANT 31.00000000 + CONSTANT 30.00000000 + CONSTANT 29.00000000 + CONSTANT 28.00000000 + CONSTANT 27.00000000 + CONSTANT 26.00000000 + CONSTANT 25.00000000 + CONSTANT 24.00000000 + CONSTANT 23.00000000 + CONSTANT 22.00000000 + CONSTANT 21.00000000 + CONSTANT 20.00000000 + CONSTANT 19.00000000 + CONSTANT 18.00000000 + CONSTANT 17.00000000 + CONSTANT 16.00000000 + CONSTANT 15.00000000 + CONSTANT 14.00000000 + CONSTANT 13.00000000 + CONSTANT 12.00000000 + CONSTANT 11.00000000 + CONSTANT 10.00000000 + CONSTANT 9.00000000 + CONSTANT 8.00000000 + CONSTANT 7.00000000 + CONSTANT 6.00000000 + CONSTANT 5.00000000 + CONSTANT 4.00000000 + CONSTANT 3.00000000 + CONSTANT 2.00000000 + CONSTANT 1.00000000 + CONSTANT 0.00000000 + idomain LAYERED + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 + INTERNAL FACTOR 1 + 1 +END griddata + diff --git a/examples/data/mf6/netcdf/uzf01/uzf01.ghb.obs b/examples/data/mf6/netcdf/uzf01/uzf01.ghb.obs new file mode 100644 index 0000000000..43f18f0bd6 --- /dev/null +++ b/examples/data/mf6/netcdf/uzf01/uzf01.ghb.obs @@ -0,0 +1,10 @@ +# File generated by Flopy version 3.10.0.dev3 on 08/13/2025 at 16:16:10. +BEGIN options + DIGITS 20 + PRINT_INPUT +END options + +BEGIN continuous FILEOUT uzf01.ghb.obs.csv + 100_1_1 GHB 100 1 1 +END continuous FILEOUT uzf01.ghb.obs.csv + diff --git a/examples/data/mf6/netcdf/uzf01/uzf01.ghbg b/examples/data/mf6/netcdf/uzf01/uzf01.ghbg new file mode 100644 index 0000000000..2f5f2046a4 --- /dev/null +++ b/examples/data/mf6/netcdf/uzf01/uzf01.ghbg @@ -0,0 +1,213 @@ +# File generated by Flopy version 3.10.0.dev3 on 08/13/2025 at 16:16:10. +BEGIN options + READARRAYGRID + PRINT_INPUT + PRINT_FLOWS + OBS6 FILEIN uzf01.ghb.obs +END options + +BEGIN period 1 + bhead LAYERED + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 1.50000000 + cond LAYERED + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 3.00000000E+30 + CONSTANT 1.00000000 +END period 1 + diff --git a/examples/data/mf6/netcdf/uzf01/uzf01.ic b/examples/data/mf6/netcdf/uzf01/uzf01.ic new file mode 100644 index 0000000000..c510f67b1e --- /dev/null +++ b/examples/data/mf6/netcdf/uzf01/uzf01.ic @@ -0,0 +1,9 @@ +# File generated by Flopy version 3.10.0.dev3 on 08/13/2025 at 16:16:10. +BEGIN options +END options + +BEGIN griddata + strt + CONSTANT 0.50000000 +END griddata + diff --git a/examples/data/mf6/netcdf/uzf01/uzf01.ims b/examples/data/mf6/netcdf/uzf01/uzf01.ims new file mode 100644 index 0000000000..558f5e0bdc --- /dev/null +++ b/examples/data/mf6/netcdf/uzf01/uzf01.ims @@ -0,0 +1,22 @@ +# File generated by Flopy version 3.10.0.dev3 on 08/13/2025 at 16:16:10. +BEGIN options + PRINT_OPTION summary +END options + +BEGIN nonlinear + OUTER_DVCLOSE 1.50000000E-06 + OUTER_MAXIMUM 100 + UNDER_RELAXATION dbd + UNDER_RELAXATION_THETA 0.70000000 +END nonlinear + +BEGIN linear + INNER_MAXIMUM 10 + INNER_DVCLOSE 1.50000000E-06 + inner_rclose 1.00000000E-06 + LINEAR_ACCELERATION bicgstab + RELAXATION_FACTOR 0.97000000 + SCALING_METHOD none + REORDERING_METHOD none +END linear + diff --git a/examples/data/mf6/netcdf/uzf01/uzf01.nam b/examples/data/mf6/netcdf/uzf01/uzf01.nam new file mode 100644 index 0000000000..0a21933f68 --- /dev/null +++ b/examples/data/mf6/netcdf/uzf01/uzf01.nam @@ -0,0 +1,17 @@ +# File generated by Flopy version 3.10.0.dev3 on 08/13/2025 at 16:16:10. +BEGIN options + SAVE_FLOWS + NEWTON UNDER_RELAXATION +END options + +BEGIN packages + DIS6 uzf01.dis dis + IC6 uzf01.ic ic + NPF6 uzf01.npf npf + STO6 uzf01.sto sto + GHB6 uzf01.ghbg ghbg_0 + UZF6 uzf01.uzf uzf_0 + OC6 uzf01.oc oc + OBS6 uzf01.obs head_obs +END packages + diff --git a/examples/data/mf6/netcdf/uzf01/uzf01.npf b/examples/data/mf6/netcdf/uzf01/uzf01.npf new file mode 100644 index 0000000000..ffd4c1dc75 --- /dev/null +++ b/examples/data/mf6/netcdf/uzf01/uzf01.npf @@ -0,0 +1,11 @@ +# File generated by Flopy version 3.10.0.dev3 on 08/13/2025 at 16:16:10. +BEGIN options +END options + +BEGIN griddata + icelltype + CONSTANT 1 + k + CONSTANT 1.00000000 +END griddata + diff --git a/examples/data/mf6/netcdf/uzf01/uzf01.obs b/examples/data/mf6/netcdf/uzf01/uzf01.obs new file mode 100644 index 0000000000..c03d635c35 --- /dev/null +++ b/examples/data/mf6/netcdf/uzf01/uzf01.obs @@ -0,0 +1,10 @@ +# File generated by Flopy version 3.10.0.dev3 on 08/13/2025 at 16:16:10. +BEGIN options + DIGITS 20 +END options + +BEGIN continuous FILEOUT uzf01.obs.csv + obs1 head 1 1 1 + obs2 head 2 1 1 +END continuous FILEOUT uzf01.obs.csv + diff --git a/examples/data/mf6/netcdf/uzf01/uzf01.oc b/examples/data/mf6/netcdf/uzf01/uzf01.oc new file mode 100644 index 0000000000..0e367a36ea --- /dev/null +++ b/examples/data/mf6/netcdf/uzf01/uzf01.oc @@ -0,0 +1,14 @@ +# File generated by Flopy version 3.10.0.dev3 on 08/13/2025 at 16:16:10. +BEGIN options + BUDGET FILEOUT uzf01.bud + HEAD FILEOUT uzf01.hds + HEAD PRINT_FORMAT COLUMNS 10 WIDTH 15 DIGITS 6 GENERAL +END options + +BEGIN period 1 + SAVE HEAD ALL + SAVE BUDGET ALL + PRINT HEAD LAST + PRINT BUDGET ALL +END period 1 + diff --git a/examples/data/mf6/netcdf/uzf01/uzf01.sto b/examples/data/mf6/netcdf/uzf01/uzf01.sto new file mode 100644 index 0000000000..b0ffd665a9 --- /dev/null +++ b/examples/data/mf6/netcdf/uzf01/uzf01.sto @@ -0,0 +1,17 @@ +# File generated by Flopy version 3.10.0.dev3 on 08/13/2025 at 16:16:10. +BEGIN options +END options + +BEGIN griddata + iconvert + CONSTANT 1 + ss + CONSTANT 0.00000000 + sy + CONSTANT 0.10000000 +END griddata + +BEGIN period 1 + TRANSIENT +END period 1 + diff --git a/examples/data/mf6/netcdf/uzf01/uzf01.tdis b/examples/data/mf6/netcdf/uzf01/uzf01.tdis new file mode 100644 index 0000000000..a71368402b --- /dev/null +++ b/examples/data/mf6/netcdf/uzf01/uzf01.tdis @@ -0,0 +1,13 @@ +# File generated by Flopy version 3.10.0.dev3 on 08/13/2025 at 16:16:10. +BEGIN options + TIME_UNITS days +END options + +BEGIN dimensions + NPER 1 +END dimensions + +BEGIN perioddata + 500.00000000 10 1.00000000 +END perioddata + diff --git a/examples/data/mf6/netcdf/uzf01/uzf01.uzf b/examples/data/mf6/netcdf/uzf01/uzf01.uzf new file mode 100644 index 0000000000..d718123673 --- /dev/null +++ b/examples/data/mf6/netcdf/uzf01/uzf01.uzf @@ -0,0 +1,123 @@ +# File generated by Flopy version 3.10.0.dev3 on 08/13/2025 at 16:16:10. +BEGIN options + BOUNDNAMES + PRINT_INPUT + PRINT_FLOWS + SAVE_FLOWS + BUDGET FILEOUT uzf01.uzf.bud + BUDGETCSV FILEOUT uzf01.uzf.bud.csv + OBS6 FILEIN uzf01.uzf.obs +END options + +BEGIN dimensions + NUZFCELLS 99 + NTRAILWAVES 15 + NWAVESETS 40 +END dimensions + +BEGIN packagedata + 1 1 1 1 1 2 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 001' + 2 2 1 1 0 3 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 002' + 3 3 1 1 0 4 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 003' + 4 4 1 1 0 5 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 004' + 5 5 1 1 0 6 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 005' + 6 6 1 1 0 7 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 006' + 7 7 1 1 0 8 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 007' + 8 8 1 1 0 9 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 008' + 9 9 1 1 0 10 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 009' + 10 10 1 1 0 11 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 010' + 11 11 1 1 0 12 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 011' + 12 12 1 1 0 13 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 012' + 13 13 1 1 0 14 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 013' + 14 14 1 1 0 15 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 014' + 15 15 1 1 0 16 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 015' + 16 16 1 1 0 17 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 016' + 17 17 1 1 0 18 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 017' + 18 18 1 1 0 19 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 018' + 19 19 1 1 0 20 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 019' + 20 20 1 1 0 21 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 020' + 21 21 1 1 0 22 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 021' + 22 22 1 1 0 23 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 022' + 23 23 1 1 0 24 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 023' + 24 24 1 1 0 25 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 024' + 25 25 1 1 0 26 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 025' + 26 26 1 1 0 27 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 026' + 27 27 1 1 0 28 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 027' + 28 28 1 1 0 29 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 028' + 29 29 1 1 0 30 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 029' + 30 30 1 1 0 31 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 030' + 31 31 1 1 0 32 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 031' + 32 32 1 1 0 33 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 032' + 33 33 1 1 0 34 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 033' + 34 34 1 1 0 35 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 034' + 35 35 1 1 0 36 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 035' + 36 36 1 1 0 37 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 036' + 37 37 1 1 0 38 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 037' + 38 38 1 1 0 39 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 038' + 39 39 1 1 0 40 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 039' + 40 40 1 1 0 41 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 040' + 41 41 1 1 0 42 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 041' + 42 42 1 1 0 43 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 042' + 43 43 1 1 0 44 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 043' + 44 44 1 1 0 45 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 044' + 45 45 1 1 0 46 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 045' + 46 46 1 1 0 47 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 046' + 47 47 1 1 0 48 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 047' + 48 48 1 1 0 49 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 048' + 49 49 1 1 0 50 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 049' + 50 50 1 1 0 51 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 050' + 51 51 1 1 0 52 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 051' + 52 52 1 1 0 53 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 052' + 53 53 1 1 0 54 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 053' + 54 54 1 1 0 55 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 054' + 55 55 1 1 0 56 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 055' + 56 56 1 1 0 57 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 056' + 57 57 1 1 0 58 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 057' + 58 58 1 1 0 59 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 058' + 59 59 1 1 0 60 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 059' + 60 60 1 1 0 61 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 060' + 61 61 1 1 0 62 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 061' + 62 62 1 1 0 63 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 062' + 63 63 1 1 0 64 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 063' + 64 64 1 1 0 65 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 064' + 65 65 1 1 0 66 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 065' + 66 66 1 1 0 67 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 066' + 67 67 1 1 0 68 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 067' + 68 68 1 1 0 69 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 068' + 69 69 1 1 0 70 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 069' + 70 70 1 1 0 71 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 070' + 71 71 1 1 0 72 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 071' + 72 72 1 1 0 73 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 072' + 73 73 1 1 0 74 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 073' + 74 74 1 1 0 75 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 074' + 75 75 1 1 0 76 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 075' + 76 76 1 1 0 77 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 076' + 77 77 1 1 0 78 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 077' + 78 78 1 1 0 79 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 078' + 79 79 1 1 0 80 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 079' + 80 80 1 1 0 81 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 080' + 81 81 1 1 0 82 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 081' + 82 82 1 1 0 83 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 082' + 83 83 1 1 0 84 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 083' + 84 84 1 1 0 85 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 084' + 85 85 1 1 0 86 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 085' + 86 86 1 1 0 87 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 086' + 87 87 1 1 0 88 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 087' + 88 88 1 1 0 89 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 088' + 89 89 1 1 0 90 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 089' + 90 90 1 1 0 91 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 090' + 91 91 1 1 0 92 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 091' + 92 92 1 1 0 93 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 092' + 93 93 1 1 0 94 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 093' + 94 94 1 1 0 95 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 094' + 95 95 1 1 0 96 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 095' + 96 96 1 1 0 97 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 096' + 97 97 1 1 0 98 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 097' + 98 98 1 1 0 99 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 098' + 99 99 1 1 0 0 0.10000000 1.00000000 0.05000000 0.10000000 0.05000000 4.00000000 'uzf 099' +END packagedata + +BEGIN period 1 + 1 2.01 0.0 0.0 0.0 0.0 0.0 0.0 +END period 1 + diff --git a/examples/data/mf6/netcdf/uzf01/uzf01.uzf.obs b/examples/data/mf6/netcdf/uzf01/uzf01.uzf.obs new file mode 100644 index 0000000000..2d6aa408a3 --- /dev/null +++ b/examples/data/mf6/netcdf/uzf01/uzf01.uzf.obs @@ -0,0 +1,13 @@ +# File generated by Flopy version 3.10.0.dev3 on 08/13/2025 at 16:16:10. +BEGIN options +END options + +BEGIN continuous FILEOUT uzf01.uzf.obs.csv + 'wc 02' water-content 2 0.5 + 'wc 50' water-content 50 0.5 + 'wcbn 02' water-content 'uzf 002' 0.5 + 'wcbn 50' water-content 'UZF 050' 0.5 + 'rch 02' uzf-gwrch 'uzf 002' + 'rch 50' uzf-gwrch 'uzf 050' +END continuous FILEOUT uzf01.uzf.obs.csv + diff --git a/flopy/mf6/data/mfdataarray.py b/flopy/mf6/data/mfdataarray.py index e00c877b1a..412a2c4c0c 100644 --- a/flopy/mf6/data/mfdataarray.py +++ b/flopy/mf6/data/mfdataarray.py @@ -1160,7 +1160,7 @@ def _get_file_entry( if ( data_storage is None or data_storage.layer_storage.get_total_size() == 0 - or not data_storage.has_data() + or (not data_storage.has_data() and not data_storage.netcdf) ): return "" @@ -1206,7 +1206,7 @@ def _get_file_entry( f"{indent}{self.structure.name}{indent}{data}\n" ) elif data_storage.layered: - if not layered_aux: + if not layered_aux and not data_storage.netcdf: if not self.structure.data_item_structures[0].just_data: name = self.structure.name file_entry_array.append(f"{indent}{name}{indent}LAYERED\n") @@ -1241,7 +1241,11 @@ def _get_file_entry( layer_min = layer layer_max = shape_ml.inc_shape_idx(layer) - if layered_aux: + + if data_storage.netcdf: + file_entry_array.append(f"{indent}{self.structure.name}{indent}NETCDF\n") + + elif layered_aux: aux_var_names = ( self.data_dimensions.package_dim.get_aux_variables()[0] ) @@ -1274,15 +1278,18 @@ def _get_file_entry( file_entry_array.append( f"{indent}{self._get_aux_var_name([0])}\n" ) + elif data_storage.netcdf: + file_entry_array.append(f"{indent}{self.structure.name}{indent}NETCDF\n") else: file_entry_array.append(f"{indent}{self.structure.name}\n") data_storage_type = data_storage.layer_storage[0].data_storage_type - file_entry_array.append( - self._get_file_entry_layer( - None, data_indent, data_storage_type, ext_file_action + if not data_storage.netcdf: + file_entry_array.append( + self._get_file_entry_layer( + None, data_indent, data_storage_type, ext_file_action + ) ) - ) return "".join(file_entry_array) @@ -1395,6 +1402,9 @@ def _get_file_entry_layer( const_val, layer, self._data_type ).upper() file_entry = f"{file_entry}{indent_string}{const_str}" + elif self._get_storage_obj().netcdf: + indent = self._simulation_data.indent_string + file_entry = f"{indent}{self.structure.name}{indent_string}NETCDF\n" else: # external data ext_str = self._get_external_formatting_string( @@ -1590,6 +1600,17 @@ def plot( return axes + def _set_storage_netcdf(self, reset=False): + + if isinstance(self, MFTransientArray): + storage = self._get_storage_obj() + for key in self._data_storage.keys(): + self.get_data_prep(key) + self._data_storage[key]._set_storage_netcdf(reset=reset) + else: + storage = self._get_storage_obj() + storage._set_storage_netcdf(reset=reset) + class MFTransientArray(MFArray, MFTransient): """ diff --git a/flopy/mf6/data/mfdatastorage.py b/flopy/mf6/data/mfdatastorage.py index afcf2596ae..17b59007e9 100644 --- a/flopy/mf6/data/mfdatastorage.py +++ b/flopy/mf6/data/mfdatastorage.py @@ -203,6 +203,8 @@ class DataStorage: what internal type is the data stored in (ndarray, recarray, scalar) layered : bool is the data layered + netcdf : bool + is the data stored in netcdf pre_data_comments : string any comments before the start of the data comments : dict @@ -327,6 +329,7 @@ def __init__( self.build_type_list(resolve_data_shape=False) self.layered = layered + self.netcdf = False # initialize comments self.pre_data_comments = None @@ -1445,7 +1448,6 @@ def _resolve_data_line(self, data, key): for data_item_index, data_item in enumerate( struct.data_item_structures ): - print(data_item) if data_item.type == DatumType.keyword: if data_lst[0].lower() != data_item.name.lower(): data_lst_updated.append(data_item.name) @@ -1832,6 +1834,7 @@ def store_external( self._data_path, self._stress_period, ) + file_access.write_text_file( data, fp, @@ -1871,6 +1874,12 @@ def point_to_existing_external_file(self, arr_line, layer): self.set_ext_file_attributes(layer, data_file, print_format, binary) self.layer_storage[layer].factor = multiplier + def _set_storage_netcdf(self, reset=False): + if reset: + self.netcdf = False + else: + self.netcdf = True + def external_to_external( self, new_external_file, multiplier=None, layer=None, binary=None ): diff --git a/flopy/mf6/data/mfstructure.py b/flopy/mf6/data/mfstructure.py index 9a9fddf34d..4dad360318 100644 --- a/flopy/mf6/data/mfstructure.py +++ b/flopy/mf6/data/mfstructure.py @@ -1583,6 +1583,7 @@ def __init__(self, dfn_file, path, common, model_file): self.description = "" self.path = path + (self.file_type,) self.model_file = model_file # file belongs to a specific model + self.read_array_grid = False self.read_as_arrays = False self.blocks, self.header = dfn_file.get_block_structure_dict( self.path, @@ -1724,6 +1725,9 @@ class MFSimulationStructure: get_data_structure(path : string) Returns a data structure if it exists, otherwise returns None. Data structure type returned is based on the tuple/list "path" + tag_read_array + Searches through all packages and tags any packages with a name that + indicates they are the READASARRAYS or READARRAYGRID version of a package. """ def __init__(self): @@ -1821,22 +1825,29 @@ def get_data_structure(self, path): else: return None - def _tag_read_as_arrays(self): - for pkg_spec in self.pkg_spec.values(): + def tag_read_array(self): + for key, package_struct in self.package_struct_objs.items(): if ( - pkg_spec.get_data_structure(("options", "readasarrays")) - or pkg_spec.get_data_structure(("options", "readarraylayer")) - or pkg_spec.get_data_structure(("options", "readarraygrid")) + package_struct.get_data_structure(('options', 'readasarrays')) ): - pkg_spec.read_as_arrays = True - for mdl_spec in self.mdl_spec.values(): - for pkg_spec in mdl_spec.pkg_spec.values(): + package_struct.read_as_arrays = True + elif ( + package_struct.get_data_structure(('options', 'readarraygrid')) + ): + package_struct.read_array_grid = True + for model_key, model_struct in self.model_struct_objs.items(): + for ( + key, + package_struct, + ) in model_struct.package_struct_objs.items(): if ( - pkg_spec.get_data_structure(("options", "readasarrays")) - or pkg_spec.get_data_structure(("options", "readarraylayer")) - or pkg_spec.get_data_structure(("options", "readarraygrid")) + package_struct.get_data_structure(('options', 'readasarrays')) + ): + package_struct.read_as_arrays = True + elif ( + package_struct.get_data_structure(('options', 'readarraygrid')) ): - pkg_spec.read_as_arrays = True + package_struct.read_array_grid = True class MFStructure: @@ -1879,5 +1890,5 @@ def _load(self): package.package_abbr ] = entry[1:] # process each package - self.sim_spec.register(Dfn(package)) - self.sim_spec._tag_read_as_arrays() + self.sim_struct.process_dfn(DfnPackage(package)) + self.sim_struct.tag_read_array() diff --git a/flopy/mf6/mfbase.py b/flopy/mf6/mfbase.py index 27bc070f34..fa66005c16 100644 --- a/flopy/mf6/mfbase.py +++ b/flopy/mf6/mfbase.py @@ -27,6 +27,12 @@ class ReadAsArraysException(Exception): package. """ +class ReadArrayGridException(Exception): + """ + Exception occurs when loading ReadArrayGrid package as non-ReadArrayGrid + package. + """ + # external exceptions for users class FlopyException(Exception): diff --git a/flopy/mf6/mfmodel.py b/flopy/mf6/mfmodel.py index 3cd301ecef..7d69eb7b24 100644 --- a/flopy/mf6/mfmodel.py +++ b/flopy/mf6/mfmodel.py @@ -25,6 +25,7 @@ MFFileMgmt, PackageContainer, PackageContainerType, + ReadArrayGridException, ReadAsArraysException, VerbosityLevel, ) @@ -931,6 +932,17 @@ def load_base( # load name file instance.name_file.load(strict) + if hasattr(instance.name_file, "nc_filerecord"): + nc_filerecord = instance.name_file.nc_filerecord.get_data() + if nc_filerecord: + message = ( + "NetCDF input file is currently " + "unsupported for model load." + ) + raise MFDataException( + model=modelname, + message=message, + ) # order packages # FIX: Transport - Priority packages maybe should not be hard coded @@ -1299,7 +1311,11 @@ def _format_data_entry(data_entry): else: return f",{data_entry}\n" - def write(self, ext_file_action=ExtFileAction.copy_relative_paths): + def write( + self, + ext_file_action=ExtFileAction.copy_relative_paths, + netcdf=None, + ): """ Writes out model's package files. @@ -1309,7 +1325,9 @@ def write(self, ext_file_action=ExtFileAction.copy_relative_paths): Defines what to do with external files when the simulation path has changed. defaults to copy_relative_paths which copies only files with relative paths, leaving files defined by absolute paths fixed. - + netcdf : str + ASCII package files will be written as configured for NetCDF input. + 'mesh2d' and 'structured' are supported types. """ # write name file @@ -1328,8 +1346,18 @@ def write(self, ext_file_action=ExtFileAction.copy_relative_paths): self.simulation_data.max_columns_user_set = False self.simulation_data.max_columns_auto_set = True + write_netcdf = netcdf and ( + self.model_type == "gwf6" + or self.model_type == "gwt6" + or self.model_type == "gwe6" + ) + # write packages for pp in self.packagelist: + if write_netcdf: + # reset data storage to write ascii for netcdf + pp._set_netcdf_storage() + if ( self.simulation_data.verbosity_level.value >= VerbosityLevel.normal.value @@ -1337,6 +1365,10 @@ def write(self, ext_file_action=ExtFileAction.copy_relative_paths): print(f" writing package {pp._get_pname()}...") pp.write(ext_file_action=ext_file_action) + if write_netcdf: + # reset data storage + pp._set_netcdf_storage(reset=True) + def get_grid_type(self): """ Return the type of grid used by model 'model_name' in simulation @@ -2123,11 +2155,17 @@ def load_package( ) try: package.load(strict) - except ReadAsArraysException: + except (ReadAsArraysException, ReadArrayGridException) as e: # create ReadAsArrays package and load it instead - package_obj = PackageContainer.package_factory( - f"{ftype}a", model_type - ) + if isinstance(e, ReadAsArraysException): + package_obj = PackageContainer.package_factory( + f"{ftype}a", model_type + ) + # create ReadArrayGrid package and load it instead + elif isinstance(e, ReadArrayGridException): + package_obj = PackageContainer.package_factory( + f"{ftype}g", model_type + ) package = package_obj( self, filename=fname, diff --git a/flopy/mf6/mfpackage.py b/flopy/mf6/mfpackage.py index 828ff8e640..a962e1d215 100644 --- a/flopy/mf6/mfpackage.py +++ b/flopy/mf6/mfpackage.py @@ -33,6 +33,7 @@ MFInvalidTransientBlockHeaderException, PackageContainer, PackageContainerType, + ReadArrayGridException, ReadAsArraysException, VerbosityLevel, ) @@ -1132,6 +1133,17 @@ def _find_data_by_keyword(self, line, fd, initial_comment): "package {}".format(self.path) ) raise ReadAsArraysException(error_msg) + elif ( + arr_line[0].lower() == "readarraygrid" + and self.path[-1].lower() == "options" + and self._container_package.structure.read_array_grid is False + ): + error_msg = ( + "ERROR: Attempting to read a ReadArrayGrid " + "package as a non-ReadArrayGrid " + "package {}".format(self.path) + ) + raise ReadArrayGridException(error_msg) else: nothing_found = True @@ -1686,6 +1698,34 @@ def is_valid(self): if dataset.enabled and not dataset.is_valid(): return False + def _set_netcdf_storage(self, reset=False): + """Set the dataset storage to netcdf if supported for the dataset. + + Parameters + ---------- + reset : bool + reset netcdf storage to not set. + + """ + + for key, dataset in self.datasets.items(): + if ( + isinstance(dataset, mfdataarray.MFArray) + or isinstance(dataset, mfdataarray.MFTransientArray) + ): + if dataset.structure.netcdf and dataset.has_data(): + try: + dataset._set_storage_netcdf(reset) + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message="Error setting netcdf storage: " + ' data of dataset "{}" in block ' + '"{}"'.format(dataset.structure.name, self.structure.name), + ) + class MFPackage(PackageInterface): """ @@ -3641,6 +3681,21 @@ def _data_shape(shape): return dataset + def _set_netcdf_storage(self, reset=False): + """Set griddata array dataset storage to netcdf. + + Parameters + ---------- + reset : bool + reset netcdf storage to not set. + + """ + + # update blocks + for key, block in self.blocks.items(): + if key == "griddata" or key == "period": + block._set_netcdf_storage(reset) + class MFChildPackages: """ diff --git a/flopy/mf6/mfsimbase.py b/flopy/mf6/mfsimbase.py index ae81b91bbb..f7f8c5bc58 100644 --- a/flopy/mf6/mfsimbase.py +++ b/flopy/mf6/mfsimbase.py @@ -1657,7 +1657,10 @@ def set_all_data_internal(self, check_data=True): package.set_all_data_internal(check_data) def write_simulation( - self, ext_file_action=ExtFileAction.copy_relative_paths, silent=False + self, + ext_file_action=ExtFileAction.copy_relative_paths, + silent=False, + netcdf=None, ): """ Write the simulation to files. @@ -1670,6 +1673,9 @@ def write_simulation( by absolute paths fixed. silent : bool Writes out the simulation in silent mode (verbosity_level = 0) + netcdf : str + ASCII package files will be written as configured for NetCDF input. + 'mesh2d' and 'structured' are supported types. """ sim_data = self.simulation_data @@ -1737,7 +1743,10 @@ def write_simulation( >= VerbosityLevel.normal.value ): print(f" writing model {model.name}...") - model.write(ext_file_action=ext_file_action) + model.write( + ext_file_action=ext_file_action, + netcdf=netcdf, + ) self.simulation_data.mfpath.set_last_accessed_path() From 87657b8c3056a045c5dfe2ff6ad2ce4688435077 Mon Sep 17 00:00:00 2001 From: mjreno Date: Thu, 14 Aug 2025 10:21:13 -0400 Subject: [PATCH 14/44] lint --- .docs/Notebooks/netcdf01_tutorial.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.docs/Notebooks/netcdf01_tutorial.py b/.docs/Notebooks/netcdf01_tutorial.py index c5037568e5..45566b136e 100644 --- a/.docs/Notebooks/netcdf01_tutorial.py +++ b/.docs/Notebooks/netcdf01_tutorial.py @@ -118,7 +118,7 @@ # dataset arrays. # # Flopy does not currently generate the NetCDF input file. This tutorial -# shows one way that can be accomplished. +# shows one way that can be accomplished. # create directory for netcdf sim sim.set_sim_path(workspace / "netcdf") @@ -289,5 +289,3 @@ # success, buff = sim.run_simulation(silent=True, report=True) # assert success, pformat(buff) - - From f662e34d9a9fb7fddb778e5e963e29948cdb6b45 Mon Sep 17 00:00:00 2001 From: mjreno Date: Thu, 14 Aug 2025 14:42:32 -0400 Subject: [PATCH 15/44] update tutorial 2 --- .docs/Notebooks/netcdf01_tutorial.py | 5 +- .docs/Notebooks/netcdf02_tutorial.py | 484 +-- examples/data/mf6/netcdf/uzf02/mfsim.nam | 19 + examples/data/mf6/netcdf/uzf02/uzf02.disv | 278 ++ examples/data/mf6/netcdf/uzf02/uzf02.ghbg | 71 + examples/data/mf6/netcdf/uzf02/uzf02.ic | 9 + examples/data/mf6/netcdf/uzf02/uzf02.ims | 22 + examples/data/mf6/netcdf/uzf02/uzf02.nam | 17 + examples/data/mf6/netcdf/uzf02/uzf02.npf | 14 + examples/data/mf6/netcdf/uzf02/uzf02.obs | 18 + examples/data/mf6/netcdf/uzf02/uzf02.oc | 14 + examples/data/mf6/netcdf/uzf02/uzf02.sto | 17 + examples/data/mf6/netcdf/uzf02/uzf02.tdis | 17 + examples/data/mf6/netcdf/uzf02/uzf02.uzf | 3036 ++++++++++++++++++ examples/data/mf6/netcdf/uzf02/uzf02.uzf.obs | 47 + examples/data/mf6/netcdf/uzf02/uzf02.uzfobs | 26 + flopy/mf6/mfmodel.py | 7 +- flopy/mf6/mfpackage.py | 6 +- 18 files changed, 3759 insertions(+), 348 deletions(-) create mode 100644 examples/data/mf6/netcdf/uzf02/mfsim.nam create mode 100644 examples/data/mf6/netcdf/uzf02/uzf02.disv create mode 100644 examples/data/mf6/netcdf/uzf02/uzf02.ghbg create mode 100644 examples/data/mf6/netcdf/uzf02/uzf02.ic create mode 100644 examples/data/mf6/netcdf/uzf02/uzf02.ims create mode 100644 examples/data/mf6/netcdf/uzf02/uzf02.nam create mode 100644 examples/data/mf6/netcdf/uzf02/uzf02.npf create mode 100644 examples/data/mf6/netcdf/uzf02/uzf02.obs create mode 100644 examples/data/mf6/netcdf/uzf02/uzf02.oc create mode 100644 examples/data/mf6/netcdf/uzf02/uzf02.sto create mode 100644 examples/data/mf6/netcdf/uzf02/uzf02.tdis create mode 100644 examples/data/mf6/netcdf/uzf02/uzf02.uzf create mode 100644 examples/data/mf6/netcdf/uzf02/uzf02.uzf.obs create mode 100644 examples/data/mf6/netcdf/uzf02/uzf02.uzfobs diff --git a/.docs/Notebooks/netcdf01_tutorial.py b/.docs/Notebooks/netcdf01_tutorial.py index 45566b136e..8c2f50dba7 100644 --- a/.docs/Notebooks/netcdf01_tutorial.py +++ b/.docs/Notebooks/netcdf01_tutorial.py @@ -57,7 +57,6 @@ root = None data_path = root / "examples" / "data" / "mf6" / "netcdf" if root else Path.cwd() -print(data_path) file_names = { "mfsim.nam": None, @@ -233,14 +232,14 @@ # print dataset npf k variable print(ds["npf_k_updated"]) -# # Update the dataset with supported `IC` arrays +# ## Update the dataset with supported `IC` arrays # ic ic = gwf.get_package("ic") ds = ic.update_dataset(ds) ds["ic_strt"].values = ic.strt.get_data() -# # Update the dataset with supported `STO` arrays +# ## Update the dataset with supported `STO` arrays # storage sto = gwf.get_package("sto") diff --git a/.docs/Notebooks/netcdf02_tutorial.py b/.docs/Notebooks/netcdf02_tutorial.py index 981323f7aa..12ea701705 100644 --- a/.docs/Notebooks/netcdf02_tutorial.py +++ b/.docs/Notebooks/netcdf02_tutorial.py @@ -37,7 +37,9 @@ from pprint import pformat, pprint from tempfile import TemporaryDirectory +import git import numpy as np +import pooch import xarray as xr import flopy @@ -45,282 +47,63 @@ print(sys.version) print(f"flopy version: {flopy.__version__}") -# ## Define `DNODATA` constant -# -# `DNODATA` is an important constant for MODFLOW 6 timeseries grid input -# data. It signifies that the cell has no data defined for the time step -# in question. These cell values are discarded and have no impact on the -# simulation. - -# DNODATA constant -DNODATA = 3.0e30 - -# ## Define ASCII input baseline simulation -# -# For the purposes of this tutorial, the specifics of this simulation -# other than it is a candidate for NetCDF input are not a focus. It -# is a NetCDF input candidate because it defines a candidate model -# type (`GWF6`) with a vertex discretization and packages that support -# NetCDF input parameters. -# -# A NetCDF dataset will be created from array data in the `IC`, and -# `GHBG` packages. Data will be copied from the package objects into -# dataset arrays. - - -# A FloPy ASCII base simulation that will be updated use netcdf inputs -def create_sim(ws): - name = "uzf02" - nlay = 5 - nrow = 10 - ncol = 10 - ncpl = nrow * ncol - delr = 1.0 - delc = 1.0 - nper = 5 - perlen = [10] * 5 - nstp = [5] * 5 - tsmult = len(perlen) * [1.0] - top = 25.0 - botm = [20.0, 15.0, 10.0, 5.0, 0.0] - strt = 20 - nouter, ninner = 100, 300 - hclose, rclose, relax = 1e-9, 1e-3, 0.97 - - # use flopy util to get disv arguments - disvkwargs = flopy.utils.gridutil.get_disv_kwargs( - nlay, nrow, ncol, delr, delc, top, botm - ) - - # Work up UZF data - iuzno = 0 - cellid = 0 - uzf_pkdat = [] - vks = 10.0 - thtr = 0.05 - thts = 0.30 - thti = 0.15 - eps = 3.5 - - for k in np.arange(nlay): - for i in np.arange(0, ncpl, 1): - if k == 0: - landflg = 1 - surfdp = 0.25 - else: - landflg = 0 - surfdp = 1e-6 - - if k == nlay - 1: - ivertcon = -1 - else: - ivertcon = iuzno + ncpl - - bndnm = "uzf" + f"{int(i + 1):03d}" - uzf_pkdat.append( - # iuzno cellid landflag ivertcn surfdp vks thtr thts thti eps [bndnm] - [ - iuzno, - (k, i), - landflg, - ivertcon, - surfdp, - vks, - thtr, - thts, - thti, - eps, - bndnm, - ] - ) - - iuzno += 1 - - extdp = 14.0 - extwc = 0.055 - pet = 0.001 - zero = 0.0 - uzf_spd = {} - for t in np.arange(0, nper, 1): - spd = [] - iuzno = 0 - for k in np.arange(nlay): - for i in np.arange(0, ncpl, 1): - if k == 0: - if t == 0: - finf = 0.15 - if t == 1: - finf = 0.15 - if t == 2: - finf = 0.15 - if t == 3: - finf = 0.15 - if t == 4: - finf = 0.15 - - spd.append([iuzno, finf, pet, extdp, extwc, zero, zero, zero]) - iuzno += 1 - - uzf_spd.update({t: spd}) - - # Work up the GHBG boundary - ghb_ids = [(ncol - 1) + i * ncol for i in range(nrow)] - abhead = np.full((nlay, ncpl), DNODATA, dtype=float) - acond = np.full((nlay, ncpl), DNODATA, dtype=float) - cond = 1e4 - for k in np.arange(3, 5, 1): - for i in ghb_ids: - abhead[k, i] = 14.0 - acond[k, i] = cond - - # build MODFLOW 6 files - sim = flopy.mf6.MFSimulation( - sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws - ) - - # time discretization - tdis_rc = [] - for i in range(nper): - tdis_rc.append((perlen[i], nstp[i], tsmult[i])) - - # create tdis package - tdis = flopy.mf6.ModflowTdis(sim, time_units="DAYS", nper=nper, perioddata=tdis_rc) - - # create gwf model - gwf = flopy.mf6.ModflowGwf( - sim, modelname=name, newtonoptions="NEWTON", save_flows=True - ) - - # create iterative model solution and register the gwf model with it - ims = flopy.mf6.ModflowIms( - sim, - print_option="SUMMARY", - complexity="MODERATE", - outer_dvclose=hclose, - outer_maximum=nouter, - under_relaxation="DBD", - inner_maximum=ninner, - inner_dvclose=hclose, - rcloserecord=rclose, - linear_acceleration="BICGSTAB", - scaling_method="NONE", - reordering_method="NONE", - relaxation_factor=relax, - ) - sim.register_ims_package(ims, [gwf.name]) - - # disv - disv = flopy.mf6.ModflowGwfdisv(gwf, **disvkwargs) - - # initial conditions - ic = flopy.mf6.ModflowGwfic(gwf, strt=strt) - - # node property flow - npf = flopy.mf6.ModflowGwfnpf(gwf, save_flows=True, icelltype=1, k=0.1, k33=1) - - # aquifer storage - sto = flopy.mf6.ModflowGwfsto(gwf, iconvert=1, ss=1e-5, sy=0.2, transient=True) - - # general-head boundary - ghb = flopy.mf6.ModflowGwfghbg(gwf, print_flows=True, bhead=abhead, cond=acond) - - # unsaturated-zone flow - etobs = [] - i = 4 - # Seems as though these are 1-based and not 0-based, like the rest of flopy - for j in list(np.arange(40, 50, 1)) + list(np.arange(140, 150, 1)): - etobs.append(("uzet_" + str(j + 1), "uzet", (j,))) - etobs.append(("uzf-gwet_" + str(j + 1), "uzf-gwet", (j,))) - - uzf_obs = {f"{name}.uzfobs": etobs} - - uzf = flopy.mf6.ModflowGwfuzf( - gwf, - print_flows=True, - save_flows=True, - simulate_et=True, - simulate_gwseep=True, - linear_gwet=True, - observations=uzf_obs, - boundnames=True, - ntrailwaves=15, - nwavesets=40, - nuzfcells=len(uzf_pkdat), - packagedata=uzf_pkdat, - perioddata=uzf_spd, - budget_filerecord=f"{name}.uzf.bud", - ) +sim_name = "uzf02" + +# Check if we are in the repository and define the data path. + +try: + root = Path(git.Repo(".", search_parent_directories=True).working_dir) +except: + root = None + +data_path = root / "examples" / "data" / "mf6" / "netcdf" if root else Path.cwd() + +file_names = { + "mfsim.nam": None, + "uzf02.disv": None, + "uzf02.ghbg": None, + "uzf02.ic": None, + "uzf02.ims": None, + "uzf02.nam": None, + "uzf02.npf": None, + "uzf02.obs": None, + "uzf02.oc": None, + "uzf02.sto": None, + "uzf02.tdis": None, + "uzf02.uzf": None, + "uzf02.uzf.obs": None, + "uzf02.uzfobs": None, +} - # output control - oc = flopy.mf6.ModflowGwfoc( - gwf, - budget_filerecord=f"{name}.cbc", - head_filerecord=f"{name}.hds", - headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")], - saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], - printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], - filename=f"{name}.oc", +for fname, fhash in file_names.items(): + pooch.retrieve( + url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{sim_name}/{fname}", + fname=fname, + path=data_path / sim_name, + known_hash=fhash, ) - # Print human-readable heads - obs_lst = [] - for k in np.arange(0, 1, 1): - for i in np.arange(40, 50, 1): - obs_lst.append(["obs_" + str(i + 1), "head", (k, i)]) - - obs_dict = {f"{name}.obs.csv": obs_lst} - obs = flopy.mf6.ModflowUtlobs(gwf, pname="head_obs", digits=20, continuous=obs_dict) - - return sim - - -# ## Create helper function to update dataset -# -# This function updates an xarray dataset to add variables described -# in a FloPy provided dictionary. -# -# The dimmap variable relates NetCDF dimension names to a value. - - -# A subroutine that can update an xarray dataset with package -# netcdf information stored in a dict -def add_netcdf_vars(dataset, nc_info, dimmap): - def _data_shape(shape): - dims_l = [] - for d in shape: - dims_l.append(dimmap[d]) - - return dims_l - - for v in nc_info: - varname = nc_info[v]["varname"] - layered = varname.split("/") - if len(layered) > 1: - l = layered[1][6] - varname = f"{layered[0]}_l{l}" - data = np.full( - _data_shape(nc_info[v]["netcdf_shape"]), - nc_info[v]["attrs"]["_FillValue"], - dtype=nc_info[v]["xarray_type"], - ) - var_d = {varname: (nc_info[v]["netcdf_shape"], data)} - dataset = dataset.assign(var_d) - for a in nc_info[v]["attrs"]: - dataset[varname].attrs[a] = nc_info[v]["attrs"][a] - - return dataset - - # ## Create simulation workspace # create temporary directories temp_dir = TemporaryDirectory() workspace = Path(temp_dir.name) -# ## Write and run baseline simulation - -# run the non-netcdf simulation -sim = create_sim(ws=workspace) +# ## Load and run baseline simulation +# +# For the purposes of this tutorial, the specifics of this simulation +# other than it is a candidate for NetCDF input are not a focus. It +# is a NetCDF input candidate because it defines a supported model type +# (`GWF6`) with a vertex discretization and packages that support +# NetCDF input parameters. Vertex (`DISV`) discretizations are only +# supported by the `UGRID layered mesh` NetCDF format and as such, the +# `mesh` attribute will be set to `layered` when passed to FloPy functions +# in this tutorial. + +# load and run the non-netcdf simulation +sim = flopy.mf6.MFSimulation.load(sim_ws=data_path / sim_name) +# sim = flopy.mf6.MFSimulation.load(sim_ws=Path("./netcdf02")) +sim.set_sim_path(workspace) sim.write_simulation() success, buff = sim.run_simulation(silent=True, report=True) assert success, pformat(buff) @@ -330,15 +113,29 @@ def _data_shape(shape): # Reset the simulation path and set the `GWF` name file `nc_filerecord` # attribute to the name of the intended input NetCDF file. Display # the resultant name file changes. +# +# When we write the updated simulation, all packages that support NetCDF +# input parameters will be converted. We will therefore need to create a +# NetCDF input file containing arrays for the `DIS`, `NPF`, `IC`, `STO`, +# and `GHBG` packages. Data will be copied from the package objects into +# dataset arrays. +# +# Flopy does not currently generate the NetCDF input file. This tutorial +# shows one way that can be accomplished. # create directory for netcdf sim -# set model name file nc_filerecord attribute to export name sim.set_sim_path(workspace / "netcdf") +# set model name file nc_filerecord attribute to export name gwf = sim.get_model("uzf02") gwf.name_file.nc_filerecord = "uzf02.layered.nc" -sim.write_simulation() +# write simulation with ASCII inputs tagged for NetCDF +sim.write_simulation(netcdf=True) +# show name file with NetCDF input configured with open(workspace / "netcdf" / "uzf02.nam", "r") as fh: print(fh.read()) +# show example package file with NetCDF input configured +with open(workspace / "netcdf" / "uzf02.ic", "r") as fh: + print(fh.read()) # ## Create dataset # @@ -359,25 +156,46 @@ def _data_shape(shape): # First, retrieve and store the netcdf info dictionary and display # its contents. Then, in the following step, update the dataset with # the model scoped attributes defined in the dictionary. +# +# These 2 operations can also be accomplished by calling `update_dataset()` +# on the model object. Analogous functions for the package are shown +# below. # get model netcdf info nc_info = gwf.netcdf_info(mesh="layered") pprint(nc_info) -# update dataset with required attributes +# update dataset directly with required attributes for a in nc_info["attrs"]: ds.attrs[a] = nc_info["attrs"][a] -# ## Map dataset dimension names to values +# ## Update the dataset with supported `DIS` arrays +# +# Add NetCDF supported data arrays in package to dataset. Internally, this call +# uses a `netcdf_info()` package dictionary to determine candidate variables +# and relevant information about them. Alternatively, this dictionary can +# be directly accessed, updated, and passed to the `update_dataset()` function. +# That workflow will be demonstrated in the `NPF` package update which follows. -# define dimensional info -dimmap = { - "time": sum(gwf.modeltime.nstp), - "z": gwf.modelgrid.nlay, - "nmesh_face": gwf.modelgrid.ncpl, -} +# update dataset with `DIS` arrays +disv = gwf.get_package("disv") +ds = disv.update_dataset(ds, mesh="layered") -# ## Access package NetCDF attributes +# ## Update array data +# +# We have created dataset array variables for the package but they do not yet +# define the expected input data for MODFLOW 6. We will take advantage of the +# existing simulation objects and update the dataset. +# +# Default dataset variable names are defined in the package `netcdf_info()` +# dictionary. + +# update dataset from dis arrays +ds["disv_top"].values = disv.top.get_data() +for l in range(gwf.modelgrid.nlay): + ds[f"disv_botm_l{l + 1}"].values = disv.botm.get_data()[l] + +# ## Access `NPF` package NetCDF attributes # # Access package scoped NetCDF details by storing the dictionary returned # from `netcdf_info()`. We need to set package variable attributes that are @@ -388,59 +206,62 @@ def _data_shape(shape): # step, the dictionary and the dataset are passed to a helper routine that # create the intended array variables. -# get ic package netcdf info -ic = gwf.get_package("ic") -nc_info = ic.netcdf_info(mesh="layered") +# get npf package netcdf info +npf = gwf.get_package("npf") +nc_info = npf.netcdf_info(mesh="layered") pprint(nc_info) -# create ic dataset variables -ds = add_netcdf_vars(ds, nc_info, dimmap) - -# ## Update array data +# ## Update package `netcdf_info` dictionary and dataset # -# We have created dataset array variables for the package but they do not yet -# define the expected input data for MODFLOW 6. We will take advantage of the -# existing simulation objects and update the dataset. +# Here we update the `NPF K` layer 1 input parameter to add the +# `standard_name` attribute to it's attribute dictionary. The dictionary +# is then passed to the `update_dataset()` function. Note the updated name +# is used in the subsequent block when updating the array values. + +# update dataset with `NPF` arrays +nc_info["k/layer1"]["attrs"]["standard_name"] = ( + "soil_hydraulic_conductivity_at_saturation" +) +ds = npf.update_dataset(ds, netcdf_info=nc_info, mesh="layered") -# update dataset from ic strt array +# ## Update `NPF` array data + +# update dataset from npf arrays for l in range(gwf.modelgrid.nlay): - ds[f"ic_strt_l{l + 1}"].values = ic.strt.get_data()[l].flatten() + ds[f"npf_icelltype_l{l + 1}"].values = npf.icelltype.get_data()[l] + ds[f"npf_k_l{l + 1}"].values = npf.k.get_data()[l] + ds[f"npf_k33_l{l + 1}"].values = npf.k33.get_data()[l] -# ## Update MODFLOW 6 package input file -# -# MODFLOW 6 input data for the package is now in the dataset. Once the NetCDF -# file is generated, we need to configure MODFLOW 6 so that it looks to that -# file for the package array input. The ASCII file will no longer defined the -# arrays- instead the array names will be followed by the NETCDF keyword. -# -# We will simply overwrite the entire MODFLOW 6 `IC` package input file with the -# following code block. - -# rewrite mf6 ic input to read from netcdf -with open(workspace / "netcdf" / "uzf02.ic", "w") as f: - f.write("BEGIN options\n") - f.write("END options\n\n") - f.write("BEGIN griddata\n") - f.write(" strt NETCDF\n") - f.write("END griddata\n") -with open(workspace / "netcdf" / "uzf02.ic", "r") as fh: - print(fh.read()) +# ## Show dataset `NPF K` parameter with updates -# ## Update MODFLOW 6 package input file -# -# Follow the same process as above for the `GHBG` package. The difference is -# that this is PERIOD input and therefore stored as timeseries data in the -# NetCDF file. As NETCDF timeseries are defined in terms of total number of -# simulation steps, care must be taken in the translation of FloPy period -# data to the timeseries. +# print dataset npf k variable +print(ds["npf_k_l1"]) + +# ## Update the dataset with supported `IC` arrays -# get ghbg package netcdf info +# ic +ic = gwf.get_package("ic") +ds = ic.update_dataset(ds, mesh="layered") +for l in range(gwf.modelgrid.nlay): + ds[f"ic_strt_l{l + 1}"].values = ic.strt.get_data()[l] + +# ## Update the dataset with supported `STO` arrays + +# storage +sto = gwf.get_package("sto") +ds = sto.update_dataset(ds, mesh="layered") +for l in range(gwf.modelgrid.nlay): + ds[f"sto_iconvert_l{l + 1}"].values = sto.iconvert.get_data()[l] + ds[f"sto_sy_l{l + 1}"].values = sto.sy.get_data()[l] + ds[f"sto_ss_l{l + 1}"].values = sto.ss.get_data()[l] + +# ## Update the dataset with supported `GHBG` arrays + +# update dataset with 'GHBG' arrays ghbg = gwf.get_package("ghbg_0") -nc_info = ghbg.netcdf_info(mesh="layered") -pprint(nc_info) +ds = ghbg.update_dataset(ds, mesh="layered") -# create ghbg dataset variables -ds = add_netcdf_vars(ds, nc_info, dimmap) +# ## Update `GHBG` array data # update bhead netcdf array from flopy perioddata # timeseries step index is first of stress period @@ -448,9 +269,7 @@ def _data_shape(shape): if ghbg.bhead.get_data()[p] is not None: istp = sum(gwf.modeltime.nstp[0:p]) for l in range(gwf.modelgrid.nlay): - ds[f"ghbg_0_bhead_l{l + 1}"].values[istp] = ghbg.bhead.get_data()[p][ - l - ].flatten() + ds[f"ghbg_0_bhead_l{l + 1}"].values[istp] = ghbg.bhead.get_data()[p][l] # update cond netcdf array from flopy perioddata # timeseries step index is first of stress period @@ -458,22 +277,7 @@ def _data_shape(shape): if ghbg.cond.get_data()[p] is not None: istp = sum(gwf.modeltime.nstp[0:p]) for l in range(gwf.modelgrid.nlay): - ds[f"ghbg_0_cond_l{l + 1}"].values[istp] = ghbg.cond.get_data()[p][ - l - ].flatten() - -# rewrite mf6 ghbg input to read from netcdf -with open(workspace / "netcdf/uzf02.ghbg", "w") as f: - f.write("BEGIN options\n") - f.write(" READARRAYGRID\n") - f.write(" PRINT_FLOWS\n") - f.write("END options\n\n") - f.write("BEGIN period 1\n") - f.write(" bhead NETCDF\n") - f.write(" cond NETCDF\n") - f.write("END period 1\n") -with open(workspace / "netcdf" / "uzf02.ghbg", "r") as fh: - print(fh.read()) + ds[f"ghbg_0_cond_l{l + 1}"].values[istp] = ghbg.cond.get_data()[p][l] # ## Display generated dataset diff --git a/examples/data/mf6/netcdf/uzf02/mfsim.nam b/examples/data/mf6/netcdf/uzf02/mfsim.nam new file mode 100644 index 0000000000..8d597acf84 --- /dev/null +++ b/examples/data/mf6/netcdf/uzf02/mfsim.nam @@ -0,0 +1,19 @@ +# File generated by Flopy version 3.10.0.dev3 on 08/14/2025 at 10:40:32. +BEGIN options +END options + +BEGIN timing + TDIS6 uzf02.tdis +END timing + +BEGIN models + gwf6 uzf02.nam uzf02 +END models + +BEGIN exchanges +END exchanges + +BEGIN solutiongroup 1 + ims6 uzf02.ims uzf02 +END solutiongroup 1 + diff --git a/examples/data/mf6/netcdf/uzf02/uzf02.disv b/examples/data/mf6/netcdf/uzf02/uzf02.disv new file mode 100644 index 0000000000..a19abbd1ce --- /dev/null +++ b/examples/data/mf6/netcdf/uzf02/uzf02.disv @@ -0,0 +1,278 @@ +# File generated by Flopy version 3.10.0.dev3 on 08/14/2025 at 10:40:32. +BEGIN options +END options + +BEGIN dimensions + NLAY 5 + NCPL 100 + NVERT 121 +END dimensions + +BEGIN griddata + top + INTERNAL FACTOR 1.0 + 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 + 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 + 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 + 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 + 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 25.00000000 + botm LAYERED + INTERNAL FACTOR 1.0 + 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 + 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 + 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 + 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 + 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 20.00000000 + INTERNAL FACTOR 1.0 + 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 + 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 + 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 + 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 + 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 15.00000000 + INTERNAL FACTOR 1.0 + 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 + 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 + 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 + 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 + 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 10.00000000 + INTERNAL FACTOR 1.0 + 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 + 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 + 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 + 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 + 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 5.00000000 + INTERNAL FACTOR 1.0 + 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 + 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 + 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 + 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 + 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 +END griddata + +BEGIN vertices + 1 0.00000000 10.00000000 + 2 1.00000000 10.00000000 + 3 2.00000000 10.00000000 + 4 3.00000000 10.00000000 + 5 4.00000000 10.00000000 + 6 5.00000000 10.00000000 + 7 6.00000000 10.00000000 + 8 7.00000000 10.00000000 + 9 8.00000000 10.00000000 + 10 9.00000000 10.00000000 + 11 10.00000000 10.00000000 + 12 0.00000000 9.00000000 + 13 1.00000000 9.00000000 + 14 2.00000000 9.00000000 + 15 3.00000000 9.00000000 + 16 4.00000000 9.00000000 + 17 5.00000000 9.00000000 + 18 6.00000000 9.00000000 + 19 7.00000000 9.00000000 + 20 8.00000000 9.00000000 + 21 9.00000000 9.00000000 + 22 10.00000000 9.00000000 + 23 0.00000000 8.00000000 + 24 1.00000000 8.00000000 + 25 2.00000000 8.00000000 + 26 3.00000000 8.00000000 + 27 4.00000000 8.00000000 + 28 5.00000000 8.00000000 + 29 6.00000000 8.00000000 + 30 7.00000000 8.00000000 + 31 8.00000000 8.00000000 + 32 9.00000000 8.00000000 + 33 10.00000000 8.00000000 + 34 0.00000000 7.00000000 + 35 1.00000000 7.00000000 + 36 2.00000000 7.00000000 + 37 3.00000000 7.00000000 + 38 4.00000000 7.00000000 + 39 5.00000000 7.00000000 + 40 6.00000000 7.00000000 + 41 7.00000000 7.00000000 + 42 8.00000000 7.00000000 + 43 9.00000000 7.00000000 + 44 10.00000000 7.00000000 + 45 0.00000000 6.00000000 + 46 1.00000000 6.00000000 + 47 2.00000000 6.00000000 + 48 3.00000000 6.00000000 + 49 4.00000000 6.00000000 + 50 5.00000000 6.00000000 + 51 6.00000000 6.00000000 + 52 7.00000000 6.00000000 + 53 8.00000000 6.00000000 + 54 9.00000000 6.00000000 + 55 10.00000000 6.00000000 + 56 0.00000000 5.00000000 + 57 1.00000000 5.00000000 + 58 2.00000000 5.00000000 + 59 3.00000000 5.00000000 + 60 4.00000000 5.00000000 + 61 5.00000000 5.00000000 + 62 6.00000000 5.00000000 + 63 7.00000000 5.00000000 + 64 8.00000000 5.00000000 + 65 9.00000000 5.00000000 + 66 10.00000000 5.00000000 + 67 0.00000000 4.00000000 + 68 1.00000000 4.00000000 + 69 2.00000000 4.00000000 + 70 3.00000000 4.00000000 + 71 4.00000000 4.00000000 + 72 5.00000000 4.00000000 + 73 6.00000000 4.00000000 + 74 7.00000000 4.00000000 + 75 8.00000000 4.00000000 + 76 9.00000000 4.00000000 + 77 10.00000000 4.00000000 + 78 0.00000000 3.00000000 + 79 1.00000000 3.00000000 + 80 2.00000000 3.00000000 + 81 3.00000000 3.00000000 + 82 4.00000000 3.00000000 + 83 5.00000000 3.00000000 + 84 6.00000000 3.00000000 + 85 7.00000000 3.00000000 + 86 8.00000000 3.00000000 + 87 9.00000000 3.00000000 + 88 10.00000000 3.00000000 + 89 0.00000000 2.00000000 + 90 1.00000000 2.00000000 + 91 2.00000000 2.00000000 + 92 3.00000000 2.00000000 + 93 4.00000000 2.00000000 + 94 5.00000000 2.00000000 + 95 6.00000000 2.00000000 + 96 7.00000000 2.00000000 + 97 8.00000000 2.00000000 + 98 9.00000000 2.00000000 + 99 10.00000000 2.00000000 + 100 0.00000000 1.00000000 + 101 1.00000000 1.00000000 + 102 2.00000000 1.00000000 + 103 3.00000000 1.00000000 + 104 4.00000000 1.00000000 + 105 5.00000000 1.00000000 + 106 6.00000000 1.00000000 + 107 7.00000000 1.00000000 + 108 8.00000000 1.00000000 + 109 9.00000000 1.00000000 + 110 10.00000000 1.00000000 + 111 0.00000000 0.00000000 + 112 1.00000000 0.00000000 + 113 2.00000000 0.00000000 + 114 3.00000000 0.00000000 + 115 4.00000000 0.00000000 + 116 5.00000000 0.00000000 + 117 6.00000000 0.00000000 + 118 7.00000000 0.00000000 + 119 8.00000000 0.00000000 + 120 9.00000000 0.00000000 + 121 10.00000000 0.00000000 +END vertices + +BEGIN cell2d + 1 0.50000000 9.50000000 4 1 2 13 12 + 2 1.50000000 9.50000000 4 2 3 14 13 + 3 2.50000000 9.50000000 4 3 4 15 14 + 4 3.50000000 9.50000000 4 4 5 16 15 + 5 4.50000000 9.50000000 4 5 6 17 16 + 6 5.50000000 9.50000000 4 6 7 18 17 + 7 6.50000000 9.50000000 4 7 8 19 18 + 8 7.50000000 9.50000000 4 8 9 20 19 + 9 8.50000000 9.50000000 4 9 10 21 20 + 10 9.50000000 9.50000000 4 10 11 22 21 + 11 0.50000000 8.50000000 4 12 13 24 23 + 12 1.50000000 8.50000000 4 13 14 25 24 + 13 2.50000000 8.50000000 4 14 15 26 25 + 14 3.50000000 8.50000000 4 15 16 27 26 + 15 4.50000000 8.50000000 4 16 17 28 27 + 16 5.50000000 8.50000000 4 17 18 29 28 + 17 6.50000000 8.50000000 4 18 19 30 29 + 18 7.50000000 8.50000000 4 19 20 31 30 + 19 8.50000000 8.50000000 4 20 21 32 31 + 20 9.50000000 8.50000000 4 21 22 33 32 + 21 0.50000000 7.50000000 4 23 24 35 34 + 22 1.50000000 7.50000000 4 24 25 36 35 + 23 2.50000000 7.50000000 4 25 26 37 36 + 24 3.50000000 7.50000000 4 26 27 38 37 + 25 4.50000000 7.50000000 4 27 28 39 38 + 26 5.50000000 7.50000000 4 28 29 40 39 + 27 6.50000000 7.50000000 4 29 30 41 40 + 28 7.50000000 7.50000000 4 30 31 42 41 + 29 8.50000000 7.50000000 4 31 32 43 42 + 30 9.50000000 7.50000000 4 32 33 44 43 + 31 0.50000000 6.50000000 4 34 35 46 45 + 32 1.50000000 6.50000000 4 35 36 47 46 + 33 2.50000000 6.50000000 4 36 37 48 47 + 34 3.50000000 6.50000000 4 37 38 49 48 + 35 4.50000000 6.50000000 4 38 39 50 49 + 36 5.50000000 6.50000000 4 39 40 51 50 + 37 6.50000000 6.50000000 4 40 41 52 51 + 38 7.50000000 6.50000000 4 41 42 53 52 + 39 8.50000000 6.50000000 4 42 43 54 53 + 40 9.50000000 6.50000000 4 43 44 55 54 + 41 0.50000000 5.50000000 4 45 46 57 56 + 42 1.50000000 5.50000000 4 46 47 58 57 + 43 2.50000000 5.50000000 4 47 48 59 58 + 44 3.50000000 5.50000000 4 48 49 60 59 + 45 4.50000000 5.50000000 4 49 50 61 60 + 46 5.50000000 5.50000000 4 50 51 62 61 + 47 6.50000000 5.50000000 4 51 52 63 62 + 48 7.50000000 5.50000000 4 52 53 64 63 + 49 8.50000000 5.50000000 4 53 54 65 64 + 50 9.50000000 5.50000000 4 54 55 66 65 + 51 0.50000000 4.50000000 4 56 57 68 67 + 52 1.50000000 4.50000000 4 57 58 69 68 + 53 2.50000000 4.50000000 4 58 59 70 69 + 54 3.50000000 4.50000000 4 59 60 71 70 + 55 4.50000000 4.50000000 4 60 61 72 71 + 56 5.50000000 4.50000000 4 61 62 73 72 + 57 6.50000000 4.50000000 4 62 63 74 73 + 58 7.50000000 4.50000000 4 63 64 75 74 + 59 8.50000000 4.50000000 4 64 65 76 75 + 60 9.50000000 4.50000000 4 65 66 77 76 + 61 0.50000000 3.50000000 4 67 68 79 78 + 62 1.50000000 3.50000000 4 68 69 80 79 + 63 2.50000000 3.50000000 4 69 70 81 80 + 64 3.50000000 3.50000000 4 70 71 82 81 + 65 4.50000000 3.50000000 4 71 72 83 82 + 66 5.50000000 3.50000000 4 72 73 84 83 + 67 6.50000000 3.50000000 4 73 74 85 84 + 68 7.50000000 3.50000000 4 74 75 86 85 + 69 8.50000000 3.50000000 4 75 76 87 86 + 70 9.50000000 3.50000000 4 76 77 88 87 + 71 0.50000000 2.50000000 4 78 79 90 89 + 72 1.50000000 2.50000000 4 79 80 91 90 + 73 2.50000000 2.50000000 4 80 81 92 91 + 74 3.50000000 2.50000000 4 81 82 93 92 + 75 4.50000000 2.50000000 4 82 83 94 93 + 76 5.50000000 2.50000000 4 83 84 95 94 + 77 6.50000000 2.50000000 4 84 85 96 95 + 78 7.50000000 2.50000000 4 85 86 97 96 + 79 8.50000000 2.50000000 4 86 87 98 97 + 80 9.50000000 2.50000000 4 87 88 99 98 + 81 0.50000000 1.50000000 4 89 90 101 100 + 82 1.50000000 1.50000000 4 90 91 102 101 + 83 2.50000000 1.50000000 4 91 92 103 102 + 84 3.50000000 1.50000000 4 92 93 104 103 + 85 4.50000000 1.50000000 4 93 94 105 104 + 86 5.50000000 1.50000000 4 94 95 106 105 + 87 6.50000000 1.50000000 4 95 96 107 106 + 88 7.50000000 1.50000000 4 96 97 108 107 + 89 8.50000000 1.50000000 4 97 98 109 108 + 90 9.50000000 1.50000000 4 98 99 110 109 + 91 0.50000000 0.50000000 4 100 101 112 111 + 92 1.50000000 0.50000000 4 101 102 113 112 + 93 2.50000000 0.50000000 4 102 103 114 113 + 94 3.50000000 0.50000000 4 103 104 115 114 + 95 4.50000000 0.50000000 4 104 105 116 115 + 96 5.50000000 0.50000000 4 105 106 117 116 + 97 6.50000000 0.50000000 4 106 107 118 117 + 98 7.50000000 0.50000000 4 107 108 119 118 + 99 8.50000000 0.50000000 4 108 109 120 119 + 100 9.50000000 0.50000000 4 109 110 121 120 +END cell2d + diff --git a/examples/data/mf6/netcdf/uzf02/uzf02.ghbg b/examples/data/mf6/netcdf/uzf02/uzf02.ghbg new file mode 100644 index 0000000000..233f3de72f --- /dev/null +++ b/examples/data/mf6/netcdf/uzf02/uzf02.ghbg @@ -0,0 +1,71 @@ +# File generated by Flopy version 3.10.0.dev3 on 08/14/2025 at 10:40:32. +BEGIN options + READARRAYGRID + PRINT_FLOWS +END options + +BEGIN period 1 + bhead LAYERED + INTERNAL FACTOR 1.0 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 + INTERNAL FACTOR 1.0 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 + INTERNAL FACTOR 1.0 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 + INTERNAL FACTOR 1.0 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 14.00000000 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 14.00000000 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 14.00000000 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 14.00000000 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 14.00000000 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 14.00000000 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 14.00000000 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 14.00000000 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 14.00000000 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 14.00000000 + INTERNAL FACTOR 1.0 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 14.00000000 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 14.00000000 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 14.00000000 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 14.00000000 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 14.00000000 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 14.00000000 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 14.00000000 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 14.00000000 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 14.00000000 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 14.00000000 + cond LAYERED + INTERNAL FACTOR 1.0 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 + INTERNAL FACTOR 1.0 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 + INTERNAL FACTOR 1.0 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 + INTERNAL FACTOR 1.0 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 10000.00000000 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 10000.00000000 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 10000.00000000 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 10000.00000000 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 10000.00000000 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 10000.00000000 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 10000.00000000 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 10000.00000000 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 10000.00000000 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 10000.00000000 + INTERNAL FACTOR 1.0 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 10000.00000000 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 10000.00000000 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 10000.00000000 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 10000.00000000 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 10000.00000000 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 10000.00000000 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 10000.00000000 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 10000.00000000 + 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 10000.00000000 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 3.00000000E+30 10000.00000000 +END period 1 + diff --git a/examples/data/mf6/netcdf/uzf02/uzf02.ic b/examples/data/mf6/netcdf/uzf02/uzf02.ic new file mode 100644 index 0000000000..c34acac299 --- /dev/null +++ b/examples/data/mf6/netcdf/uzf02/uzf02.ic @@ -0,0 +1,9 @@ +# File generated by Flopy version 3.10.0.dev3 on 08/14/2025 at 10:40:32. +BEGIN options +END options + +BEGIN griddata + strt + CONSTANT 20.00000000 +END griddata + diff --git a/examples/data/mf6/netcdf/uzf02/uzf02.ims b/examples/data/mf6/netcdf/uzf02/uzf02.ims new file mode 100644 index 0000000000..9c9d2c4061 --- /dev/null +++ b/examples/data/mf6/netcdf/uzf02/uzf02.ims @@ -0,0 +1,22 @@ +# File generated by Flopy version 3.10.0.dev3 on 08/14/2025 at 10:40:32. +BEGIN options + PRINT_OPTION summary + COMPLEXITY moderate +END options + +BEGIN nonlinear + OUTER_DVCLOSE 1.00000000E-09 + OUTER_MAXIMUM 100 + UNDER_RELAXATION dbd +END nonlinear + +BEGIN linear + INNER_MAXIMUM 300 + INNER_DVCLOSE 1.00000000E-09 + inner_rclose 0.00100000 + LINEAR_ACCELERATION bicgstab + RELAXATION_FACTOR 0.97000000 + SCALING_METHOD none + REORDERING_METHOD none +END linear + diff --git a/examples/data/mf6/netcdf/uzf02/uzf02.nam b/examples/data/mf6/netcdf/uzf02/uzf02.nam new file mode 100644 index 0000000000..c527e13238 --- /dev/null +++ b/examples/data/mf6/netcdf/uzf02/uzf02.nam @@ -0,0 +1,17 @@ +# File generated by Flopy version 3.10.0.dev3 on 08/14/2025 at 10:40:32. +BEGIN options + SAVE_FLOWS + NEWTON +END options + +BEGIN packages + DISV6 uzf02.disv disv + IC6 uzf02.ic ic + NPF6 uzf02.npf npf + STO6 uzf02.sto sto + GHB6 uzf02.ghbg ghbg_0 + UZF6 uzf02.uzf uzf_0 + OC6 uzf02.oc oc + OBS6 uzf02.obs head_obs +END packages + diff --git a/examples/data/mf6/netcdf/uzf02/uzf02.npf b/examples/data/mf6/netcdf/uzf02/uzf02.npf new file mode 100644 index 0000000000..5e16be0894 --- /dev/null +++ b/examples/data/mf6/netcdf/uzf02/uzf02.npf @@ -0,0 +1,14 @@ +# File generated by Flopy version 3.10.0.dev3 on 08/14/2025 at 10:40:32. +BEGIN options + SAVE_FLOWS +END options + +BEGIN griddata + icelltype + CONSTANT 1 + k + CONSTANT 0.10000000 + k33 + CONSTANT 1.00000000 +END griddata + diff --git a/examples/data/mf6/netcdf/uzf02/uzf02.obs b/examples/data/mf6/netcdf/uzf02/uzf02.obs new file mode 100644 index 0000000000..f5e794d7c0 --- /dev/null +++ b/examples/data/mf6/netcdf/uzf02/uzf02.obs @@ -0,0 +1,18 @@ +# File generated by Flopy version 3.10.0.dev3 on 08/14/2025 at 10:40:32. +BEGIN options + DIGITS 20 +END options + +BEGIN continuous FILEOUT uzf02.obs.csv + obs_41 head 1 41 + obs_42 head 1 42 + obs_43 head 1 43 + obs_44 head 1 44 + obs_45 head 1 45 + obs_46 head 1 46 + obs_47 head 1 47 + obs_48 head 1 48 + obs_49 head 1 49 + obs_50 head 1 50 +END continuous FILEOUT uzf02.obs.csv + diff --git a/examples/data/mf6/netcdf/uzf02/uzf02.oc b/examples/data/mf6/netcdf/uzf02/uzf02.oc new file mode 100644 index 0000000000..ccb4eec150 --- /dev/null +++ b/examples/data/mf6/netcdf/uzf02/uzf02.oc @@ -0,0 +1,14 @@ +# File generated by Flopy version 3.10.0.dev3 on 08/14/2025 at 10:40:32. +BEGIN options + BUDGET FILEOUT uzf02.cbc + HEAD FILEOUT uzf02.hds + HEAD PRINT_FORMAT COLUMNS 10 WIDTH 15 DIGITS 6 GENERAL +END options + +BEGIN period 1 + SAVE HEAD ALL + SAVE BUDGET ALL + PRINT HEAD ALL + PRINT BUDGET ALL +END period 1 + diff --git a/examples/data/mf6/netcdf/uzf02/uzf02.sto b/examples/data/mf6/netcdf/uzf02/uzf02.sto new file mode 100644 index 0000000000..b11bf51027 --- /dev/null +++ b/examples/data/mf6/netcdf/uzf02/uzf02.sto @@ -0,0 +1,17 @@ +# File generated by Flopy version 3.10.0.dev3 on 08/14/2025 at 10:40:32. +BEGIN options +END options + +BEGIN griddata + iconvert + CONSTANT 1 + ss + CONSTANT 1.00000000E-05 + sy + CONSTANT 0.20000000 +END griddata + +BEGIN period 1 + TRANSIENT +END period 1 + diff --git a/examples/data/mf6/netcdf/uzf02/uzf02.tdis b/examples/data/mf6/netcdf/uzf02/uzf02.tdis new file mode 100644 index 0000000000..5057360dbe --- /dev/null +++ b/examples/data/mf6/netcdf/uzf02/uzf02.tdis @@ -0,0 +1,17 @@ +# File generated by Flopy version 3.10.0.dev3 on 08/14/2025 at 10:40:32. +BEGIN options + TIME_UNITS days +END options + +BEGIN dimensions + NPER 5 +END dimensions + +BEGIN perioddata + 10.00000000 5 1.00000000 + 10.00000000 5 1.00000000 + 10.00000000 5 1.00000000 + 10.00000000 5 1.00000000 + 10.00000000 5 1.00000000 +END perioddata + diff --git a/examples/data/mf6/netcdf/uzf02/uzf02.uzf b/examples/data/mf6/netcdf/uzf02/uzf02.uzf new file mode 100644 index 0000000000..b954d9b543 --- /dev/null +++ b/examples/data/mf6/netcdf/uzf02/uzf02.uzf @@ -0,0 +1,3036 @@ +# File generated by Flopy version 3.10.0.dev3 on 08/14/2025 at 10:40:32. +BEGIN options + BOUNDNAMES + PRINT_FLOWS + SAVE_FLOWS + BUDGET FILEOUT uzf02.uzf.bud + OBS6 FILEIN uzf02.uzf.obs + SIMULATE_ET + LINEAR_GWET + SIMULATE_GWSEEP +END options + +BEGIN dimensions + NUZFCELLS 500 + NTRAILWAVES 15 + NWAVESETS 40 +END dimensions + +BEGIN packagedata + 1 1 1 1 101 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf001 + 2 1 2 1 102 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf002 + 3 1 3 1 103 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf003 + 4 1 4 1 104 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf004 + 5 1 5 1 105 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf005 + 6 1 6 1 106 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf006 + 7 1 7 1 107 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf007 + 8 1 8 1 108 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf008 + 9 1 9 1 109 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf009 + 10 1 10 1 110 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf010 + 11 1 11 1 111 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf011 + 12 1 12 1 112 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf012 + 13 1 13 1 113 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf013 + 14 1 14 1 114 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf014 + 15 1 15 1 115 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf015 + 16 1 16 1 116 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf016 + 17 1 17 1 117 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf017 + 18 1 18 1 118 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf018 + 19 1 19 1 119 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf019 + 20 1 20 1 120 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf020 + 21 1 21 1 121 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf021 + 22 1 22 1 122 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf022 + 23 1 23 1 123 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf023 + 24 1 24 1 124 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf024 + 25 1 25 1 125 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf025 + 26 1 26 1 126 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf026 + 27 1 27 1 127 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf027 + 28 1 28 1 128 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf028 + 29 1 29 1 129 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf029 + 30 1 30 1 130 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf030 + 31 1 31 1 131 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf031 + 32 1 32 1 132 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf032 + 33 1 33 1 133 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf033 + 34 1 34 1 134 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf034 + 35 1 35 1 135 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf035 + 36 1 36 1 136 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf036 + 37 1 37 1 137 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf037 + 38 1 38 1 138 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf038 + 39 1 39 1 139 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf039 + 40 1 40 1 140 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf040 + 41 1 41 1 141 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf041 + 42 1 42 1 142 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf042 + 43 1 43 1 143 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf043 + 44 1 44 1 144 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf044 + 45 1 45 1 145 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf045 + 46 1 46 1 146 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf046 + 47 1 47 1 147 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf047 + 48 1 48 1 148 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf048 + 49 1 49 1 149 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf049 + 50 1 50 1 150 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf050 + 51 1 51 1 151 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf051 + 52 1 52 1 152 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf052 + 53 1 53 1 153 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf053 + 54 1 54 1 154 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf054 + 55 1 55 1 155 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf055 + 56 1 56 1 156 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf056 + 57 1 57 1 157 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf057 + 58 1 58 1 158 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf058 + 59 1 59 1 159 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf059 + 60 1 60 1 160 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf060 + 61 1 61 1 161 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf061 + 62 1 62 1 162 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf062 + 63 1 63 1 163 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf063 + 64 1 64 1 164 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf064 + 65 1 65 1 165 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf065 + 66 1 66 1 166 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf066 + 67 1 67 1 167 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf067 + 68 1 68 1 168 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf068 + 69 1 69 1 169 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf069 + 70 1 70 1 170 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf070 + 71 1 71 1 171 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf071 + 72 1 72 1 172 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf072 + 73 1 73 1 173 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf073 + 74 1 74 1 174 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf074 + 75 1 75 1 175 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf075 + 76 1 76 1 176 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf076 + 77 1 77 1 177 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf077 + 78 1 78 1 178 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf078 + 79 1 79 1 179 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf079 + 80 1 80 1 180 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf080 + 81 1 81 1 181 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf081 + 82 1 82 1 182 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf082 + 83 1 83 1 183 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf083 + 84 1 84 1 184 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf084 + 85 1 85 1 185 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf085 + 86 1 86 1 186 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf086 + 87 1 87 1 187 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf087 + 88 1 88 1 188 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf088 + 89 1 89 1 189 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf089 + 90 1 90 1 190 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf090 + 91 1 91 1 191 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf091 + 92 1 92 1 192 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf092 + 93 1 93 1 193 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf093 + 94 1 94 1 194 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf094 + 95 1 95 1 195 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf095 + 96 1 96 1 196 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf096 + 97 1 97 1 197 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf097 + 98 1 98 1 198 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf098 + 99 1 99 1 199 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf099 + 100 1 100 1 200 0.25000000 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf100 + 101 2 1 0 201 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf001 + 102 2 2 0 202 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf002 + 103 2 3 0 203 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf003 + 104 2 4 0 204 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf004 + 105 2 5 0 205 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf005 + 106 2 6 0 206 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf006 + 107 2 7 0 207 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf007 + 108 2 8 0 208 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf008 + 109 2 9 0 209 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf009 + 110 2 10 0 210 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf010 + 111 2 11 0 211 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf011 + 112 2 12 0 212 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf012 + 113 2 13 0 213 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf013 + 114 2 14 0 214 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf014 + 115 2 15 0 215 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf015 + 116 2 16 0 216 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf016 + 117 2 17 0 217 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf017 + 118 2 18 0 218 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf018 + 119 2 19 0 219 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf019 + 120 2 20 0 220 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf020 + 121 2 21 0 221 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf021 + 122 2 22 0 222 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf022 + 123 2 23 0 223 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf023 + 124 2 24 0 224 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf024 + 125 2 25 0 225 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf025 + 126 2 26 0 226 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf026 + 127 2 27 0 227 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf027 + 128 2 28 0 228 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf028 + 129 2 29 0 229 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf029 + 130 2 30 0 230 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf030 + 131 2 31 0 231 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf031 + 132 2 32 0 232 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf032 + 133 2 33 0 233 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf033 + 134 2 34 0 234 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf034 + 135 2 35 0 235 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf035 + 136 2 36 0 236 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf036 + 137 2 37 0 237 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf037 + 138 2 38 0 238 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf038 + 139 2 39 0 239 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf039 + 140 2 40 0 240 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf040 + 141 2 41 0 241 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf041 + 142 2 42 0 242 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf042 + 143 2 43 0 243 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf043 + 144 2 44 0 244 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf044 + 145 2 45 0 245 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf045 + 146 2 46 0 246 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf046 + 147 2 47 0 247 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf047 + 148 2 48 0 248 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf048 + 149 2 49 0 249 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf049 + 150 2 50 0 250 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf050 + 151 2 51 0 251 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf051 + 152 2 52 0 252 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf052 + 153 2 53 0 253 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf053 + 154 2 54 0 254 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf054 + 155 2 55 0 255 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf055 + 156 2 56 0 256 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf056 + 157 2 57 0 257 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf057 + 158 2 58 0 258 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf058 + 159 2 59 0 259 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf059 + 160 2 60 0 260 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf060 + 161 2 61 0 261 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf061 + 162 2 62 0 262 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf062 + 163 2 63 0 263 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf063 + 164 2 64 0 264 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf064 + 165 2 65 0 265 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf065 + 166 2 66 0 266 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf066 + 167 2 67 0 267 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf067 + 168 2 68 0 268 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf068 + 169 2 69 0 269 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf069 + 170 2 70 0 270 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf070 + 171 2 71 0 271 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf071 + 172 2 72 0 272 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf072 + 173 2 73 0 273 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf073 + 174 2 74 0 274 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf074 + 175 2 75 0 275 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf075 + 176 2 76 0 276 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf076 + 177 2 77 0 277 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf077 + 178 2 78 0 278 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf078 + 179 2 79 0 279 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf079 + 180 2 80 0 280 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf080 + 181 2 81 0 281 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf081 + 182 2 82 0 282 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf082 + 183 2 83 0 283 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf083 + 184 2 84 0 284 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf084 + 185 2 85 0 285 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf085 + 186 2 86 0 286 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf086 + 187 2 87 0 287 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf087 + 188 2 88 0 288 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf088 + 189 2 89 0 289 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf089 + 190 2 90 0 290 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf090 + 191 2 91 0 291 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf091 + 192 2 92 0 292 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf092 + 193 2 93 0 293 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf093 + 194 2 94 0 294 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf094 + 195 2 95 0 295 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf095 + 196 2 96 0 296 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf096 + 197 2 97 0 297 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf097 + 198 2 98 0 298 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf098 + 199 2 99 0 299 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf099 + 200 2 100 0 300 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf100 + 201 3 1 0 301 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf001 + 202 3 2 0 302 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf002 + 203 3 3 0 303 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf003 + 204 3 4 0 304 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf004 + 205 3 5 0 305 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf005 + 206 3 6 0 306 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf006 + 207 3 7 0 307 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf007 + 208 3 8 0 308 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf008 + 209 3 9 0 309 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf009 + 210 3 10 0 310 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf010 + 211 3 11 0 311 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf011 + 212 3 12 0 312 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf012 + 213 3 13 0 313 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf013 + 214 3 14 0 314 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf014 + 215 3 15 0 315 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf015 + 216 3 16 0 316 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf016 + 217 3 17 0 317 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf017 + 218 3 18 0 318 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf018 + 219 3 19 0 319 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf019 + 220 3 20 0 320 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf020 + 221 3 21 0 321 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf021 + 222 3 22 0 322 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf022 + 223 3 23 0 323 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf023 + 224 3 24 0 324 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf024 + 225 3 25 0 325 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf025 + 226 3 26 0 326 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf026 + 227 3 27 0 327 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf027 + 228 3 28 0 328 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf028 + 229 3 29 0 329 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf029 + 230 3 30 0 330 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf030 + 231 3 31 0 331 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf031 + 232 3 32 0 332 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf032 + 233 3 33 0 333 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf033 + 234 3 34 0 334 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf034 + 235 3 35 0 335 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf035 + 236 3 36 0 336 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf036 + 237 3 37 0 337 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf037 + 238 3 38 0 338 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf038 + 239 3 39 0 339 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf039 + 240 3 40 0 340 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf040 + 241 3 41 0 341 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf041 + 242 3 42 0 342 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf042 + 243 3 43 0 343 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf043 + 244 3 44 0 344 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf044 + 245 3 45 0 345 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf045 + 246 3 46 0 346 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf046 + 247 3 47 0 347 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf047 + 248 3 48 0 348 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf048 + 249 3 49 0 349 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf049 + 250 3 50 0 350 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf050 + 251 3 51 0 351 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf051 + 252 3 52 0 352 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf052 + 253 3 53 0 353 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf053 + 254 3 54 0 354 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf054 + 255 3 55 0 355 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf055 + 256 3 56 0 356 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf056 + 257 3 57 0 357 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf057 + 258 3 58 0 358 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf058 + 259 3 59 0 359 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf059 + 260 3 60 0 360 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf060 + 261 3 61 0 361 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf061 + 262 3 62 0 362 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf062 + 263 3 63 0 363 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf063 + 264 3 64 0 364 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf064 + 265 3 65 0 365 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf065 + 266 3 66 0 366 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf066 + 267 3 67 0 367 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf067 + 268 3 68 0 368 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf068 + 269 3 69 0 369 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf069 + 270 3 70 0 370 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf070 + 271 3 71 0 371 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf071 + 272 3 72 0 372 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf072 + 273 3 73 0 373 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf073 + 274 3 74 0 374 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf074 + 275 3 75 0 375 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf075 + 276 3 76 0 376 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf076 + 277 3 77 0 377 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf077 + 278 3 78 0 378 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf078 + 279 3 79 0 379 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf079 + 280 3 80 0 380 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf080 + 281 3 81 0 381 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf081 + 282 3 82 0 382 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf082 + 283 3 83 0 383 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf083 + 284 3 84 0 384 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf084 + 285 3 85 0 385 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf085 + 286 3 86 0 386 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf086 + 287 3 87 0 387 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf087 + 288 3 88 0 388 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf088 + 289 3 89 0 389 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf089 + 290 3 90 0 390 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf090 + 291 3 91 0 391 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf091 + 292 3 92 0 392 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf092 + 293 3 93 0 393 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf093 + 294 3 94 0 394 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf094 + 295 3 95 0 395 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf095 + 296 3 96 0 396 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf096 + 297 3 97 0 397 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf097 + 298 3 98 0 398 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf098 + 299 3 99 0 399 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf099 + 300 3 100 0 400 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf100 + 301 4 1 0 401 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf001 + 302 4 2 0 402 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf002 + 303 4 3 0 403 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf003 + 304 4 4 0 404 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf004 + 305 4 5 0 405 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf005 + 306 4 6 0 406 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf006 + 307 4 7 0 407 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf007 + 308 4 8 0 408 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf008 + 309 4 9 0 409 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf009 + 310 4 10 0 410 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf010 + 311 4 11 0 411 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf011 + 312 4 12 0 412 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf012 + 313 4 13 0 413 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf013 + 314 4 14 0 414 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf014 + 315 4 15 0 415 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf015 + 316 4 16 0 416 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf016 + 317 4 17 0 417 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf017 + 318 4 18 0 418 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf018 + 319 4 19 0 419 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf019 + 320 4 20 0 420 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf020 + 321 4 21 0 421 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf021 + 322 4 22 0 422 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf022 + 323 4 23 0 423 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf023 + 324 4 24 0 424 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf024 + 325 4 25 0 425 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf025 + 326 4 26 0 426 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf026 + 327 4 27 0 427 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf027 + 328 4 28 0 428 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf028 + 329 4 29 0 429 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf029 + 330 4 30 0 430 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf030 + 331 4 31 0 431 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf031 + 332 4 32 0 432 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf032 + 333 4 33 0 433 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf033 + 334 4 34 0 434 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf034 + 335 4 35 0 435 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf035 + 336 4 36 0 436 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf036 + 337 4 37 0 437 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf037 + 338 4 38 0 438 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf038 + 339 4 39 0 439 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf039 + 340 4 40 0 440 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf040 + 341 4 41 0 441 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf041 + 342 4 42 0 442 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf042 + 343 4 43 0 443 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf043 + 344 4 44 0 444 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf044 + 345 4 45 0 445 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf045 + 346 4 46 0 446 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf046 + 347 4 47 0 447 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf047 + 348 4 48 0 448 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf048 + 349 4 49 0 449 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf049 + 350 4 50 0 450 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf050 + 351 4 51 0 451 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf051 + 352 4 52 0 452 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf052 + 353 4 53 0 453 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf053 + 354 4 54 0 454 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf054 + 355 4 55 0 455 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf055 + 356 4 56 0 456 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf056 + 357 4 57 0 457 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf057 + 358 4 58 0 458 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf058 + 359 4 59 0 459 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf059 + 360 4 60 0 460 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf060 + 361 4 61 0 461 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf061 + 362 4 62 0 462 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf062 + 363 4 63 0 463 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf063 + 364 4 64 0 464 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf064 + 365 4 65 0 465 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf065 + 366 4 66 0 466 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf066 + 367 4 67 0 467 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf067 + 368 4 68 0 468 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf068 + 369 4 69 0 469 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf069 + 370 4 70 0 470 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf070 + 371 4 71 0 471 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf071 + 372 4 72 0 472 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf072 + 373 4 73 0 473 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf073 + 374 4 74 0 474 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf074 + 375 4 75 0 475 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf075 + 376 4 76 0 476 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf076 + 377 4 77 0 477 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf077 + 378 4 78 0 478 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf078 + 379 4 79 0 479 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf079 + 380 4 80 0 480 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf080 + 381 4 81 0 481 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf081 + 382 4 82 0 482 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf082 + 383 4 83 0 483 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf083 + 384 4 84 0 484 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf084 + 385 4 85 0 485 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf085 + 386 4 86 0 486 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf086 + 387 4 87 0 487 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf087 + 388 4 88 0 488 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf088 + 389 4 89 0 489 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf089 + 390 4 90 0 490 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf090 + 391 4 91 0 491 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf091 + 392 4 92 0 492 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf092 + 393 4 93 0 493 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf093 + 394 4 94 0 494 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf094 + 395 4 95 0 495 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf095 + 396 4 96 0 496 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf096 + 397 4 97 0 497 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf097 + 398 4 98 0 498 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf098 + 399 4 99 0 499 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf099 + 400 4 100 0 500 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf100 + 401 5 1 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf001 + 402 5 2 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf002 + 403 5 3 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf003 + 404 5 4 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf004 + 405 5 5 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf005 + 406 5 6 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf006 + 407 5 7 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf007 + 408 5 8 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf008 + 409 5 9 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf009 + 410 5 10 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf010 + 411 5 11 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf011 + 412 5 12 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf012 + 413 5 13 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf013 + 414 5 14 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf014 + 415 5 15 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf015 + 416 5 16 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf016 + 417 5 17 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf017 + 418 5 18 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf018 + 419 5 19 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf019 + 420 5 20 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf020 + 421 5 21 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf021 + 422 5 22 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf022 + 423 5 23 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf023 + 424 5 24 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf024 + 425 5 25 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf025 + 426 5 26 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf026 + 427 5 27 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf027 + 428 5 28 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf028 + 429 5 29 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf029 + 430 5 30 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf030 + 431 5 31 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf031 + 432 5 32 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf032 + 433 5 33 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf033 + 434 5 34 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf034 + 435 5 35 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf035 + 436 5 36 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf036 + 437 5 37 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf037 + 438 5 38 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf038 + 439 5 39 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf039 + 440 5 40 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf040 + 441 5 41 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf041 + 442 5 42 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf042 + 443 5 43 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf043 + 444 5 44 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf044 + 445 5 45 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf045 + 446 5 46 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf046 + 447 5 47 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf047 + 448 5 48 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf048 + 449 5 49 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf049 + 450 5 50 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf050 + 451 5 51 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf051 + 452 5 52 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf052 + 453 5 53 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf053 + 454 5 54 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf054 + 455 5 55 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf055 + 456 5 56 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf056 + 457 5 57 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf057 + 458 5 58 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf058 + 459 5 59 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf059 + 460 5 60 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf060 + 461 5 61 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf061 + 462 5 62 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf062 + 463 5 63 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf063 + 464 5 64 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf064 + 465 5 65 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf065 + 466 5 66 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf066 + 467 5 67 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf067 + 468 5 68 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf068 + 469 5 69 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf069 + 470 5 70 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf070 + 471 5 71 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf071 + 472 5 72 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf072 + 473 5 73 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf073 + 474 5 74 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf074 + 475 5 75 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf075 + 476 5 76 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf076 + 477 5 77 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf077 + 478 5 78 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf078 + 479 5 79 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf079 + 480 5 80 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf080 + 481 5 81 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf081 + 482 5 82 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf082 + 483 5 83 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf083 + 484 5 84 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf084 + 485 5 85 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf085 + 486 5 86 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf086 + 487 5 87 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf087 + 488 5 88 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf088 + 489 5 89 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf089 + 490 5 90 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf090 + 491 5 91 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf091 + 492 5 92 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf092 + 493 5 93 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf093 + 494 5 94 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf094 + 495 5 95 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf095 + 496 5 96 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf096 + 497 5 97 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf097 + 498 5 98 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf098 + 499 5 99 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf099 + 500 5 100 0 0 1.00000000E-06 10.00000000 0.05000000 0.30000000 0.15000000 3.50000000 uzf100 +END packagedata + +BEGIN period 1 + 1 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 2 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 3 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 4 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 5 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 6 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 7 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 8 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 9 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 10 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 11 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 12 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 13 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 14 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 15 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 16 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 17 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 18 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 19 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 20 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 21 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 22 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 23 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 24 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 25 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 26 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 27 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 28 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 29 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 30 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 31 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 32 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 33 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 34 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 35 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 36 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 37 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 38 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 39 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 40 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 41 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 42 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 43 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 44 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 45 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 46 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 47 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 48 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 49 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 50 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 51 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 52 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 53 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 54 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 55 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 56 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 57 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 58 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 59 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 60 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 61 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 62 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 63 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 64 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 65 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 66 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 67 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 68 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 69 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 70 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 71 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 72 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 73 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 74 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 75 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 76 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 77 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 78 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 79 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 80 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 81 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 82 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 83 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 84 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 85 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 86 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 87 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 88 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 89 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 90 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 91 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 92 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 93 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 94 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 95 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 96 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 97 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 98 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 99 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 100 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 101 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 102 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 103 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 104 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 105 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 106 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 107 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 108 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 109 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 110 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 111 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 112 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 113 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 114 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 115 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 116 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 117 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 118 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 119 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 120 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 121 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 122 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 123 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 124 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 125 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 126 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 127 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 128 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 129 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 130 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 131 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 132 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 133 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 134 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 135 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 136 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 137 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 138 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 139 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 140 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 141 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 142 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 143 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 144 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 145 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 146 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 147 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 148 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 149 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 150 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 151 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 152 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 153 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 154 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 155 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 156 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 157 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 158 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 159 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 160 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 161 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 162 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 163 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 164 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 165 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 166 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 167 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 168 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 169 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 170 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 171 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 172 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 173 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 174 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 175 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 176 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 177 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 178 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 179 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 180 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 181 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 182 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 183 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 184 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 185 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 186 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 187 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 188 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 189 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 190 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 191 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 192 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 193 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 194 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 195 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 196 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 197 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 198 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 199 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 200 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 201 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 202 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 203 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 204 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 205 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 206 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 207 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 208 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 209 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 210 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 211 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 212 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 213 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 214 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 215 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 216 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 217 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 218 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 219 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 220 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 221 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 222 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 223 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 224 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 225 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 226 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 227 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 228 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 229 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 230 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 231 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 232 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 233 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 234 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 235 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 236 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 237 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 238 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 239 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 240 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 241 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 242 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 243 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 244 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 245 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 246 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 247 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 248 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 249 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 250 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 251 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 252 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 253 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 254 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 255 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 256 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 257 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 258 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 259 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 260 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 261 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 262 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 263 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 264 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 265 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 266 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 267 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 268 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 269 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 270 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 271 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 272 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 273 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 274 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 275 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 276 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 277 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 278 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 279 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 280 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 281 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 282 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 283 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 284 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 285 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 286 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 287 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 288 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 289 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 290 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 291 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 292 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 293 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 294 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 295 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 296 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 297 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 298 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 299 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 300 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 301 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 302 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 303 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 304 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 305 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 306 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 307 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 308 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 309 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 310 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 311 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 312 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 313 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 314 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 315 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 316 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 317 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 318 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 319 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 320 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 321 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 322 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 323 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 324 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 325 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 326 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 327 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 328 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 329 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 330 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 331 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 332 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 333 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 334 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 335 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 336 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 337 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 338 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 339 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 340 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 341 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 342 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 343 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 344 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 345 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 346 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 347 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 348 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 349 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 350 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 351 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 352 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 353 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 354 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 355 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 356 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 357 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 358 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 359 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 360 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 361 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 362 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 363 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 364 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 365 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 366 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 367 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 368 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 369 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 370 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 371 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 372 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 373 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 374 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 375 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 376 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 377 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 378 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 379 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 380 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 381 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 382 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 383 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 384 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 385 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 386 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 387 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 388 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 389 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 390 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 391 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 392 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 393 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 394 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 395 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 396 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 397 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 398 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 399 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 400 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 401 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 402 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 403 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 404 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 405 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 406 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 407 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 408 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 409 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 410 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 411 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 412 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 413 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 414 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 415 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 416 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 417 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 418 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 419 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 420 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 421 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 422 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 423 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 424 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 425 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 426 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 427 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 428 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 429 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 430 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 431 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 432 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 433 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 434 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 435 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 436 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 437 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 438 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 439 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 440 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 441 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 442 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 443 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 444 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 445 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 446 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 447 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 448 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 449 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 450 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 451 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 452 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 453 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 454 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 455 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 456 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 457 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 458 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 459 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 460 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 461 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 462 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 463 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 464 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 465 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 466 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 467 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 468 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 469 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 470 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 471 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 472 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 473 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 474 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 475 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 476 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 477 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 478 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 479 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 480 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 481 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 482 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 483 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 484 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 485 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 486 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 487 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 488 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 489 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 490 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 491 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 492 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 493 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 494 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 495 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 496 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 497 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 498 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 499 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 500 0.15 0.001 14.0 0.055 0.0 0.0 0.0 +END period 1 + +BEGIN period 2 + 1 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 2 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 3 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 4 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 5 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 6 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 7 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 8 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 9 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 10 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 11 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 12 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 13 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 14 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 15 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 16 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 17 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 18 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 19 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 20 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 21 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 22 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 23 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 24 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 25 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 26 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 27 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 28 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 29 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 30 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 31 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 32 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 33 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 34 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 35 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 36 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 37 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 38 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 39 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 40 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 41 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 42 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 43 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 44 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 45 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 46 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 47 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 48 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 49 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 50 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 51 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 52 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 53 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 54 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 55 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 56 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 57 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 58 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 59 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 60 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 61 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 62 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 63 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 64 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 65 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 66 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 67 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 68 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 69 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 70 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 71 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 72 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 73 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 74 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 75 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 76 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 77 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 78 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 79 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 80 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 81 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 82 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 83 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 84 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 85 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 86 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 87 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 88 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 89 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 90 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 91 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 92 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 93 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 94 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 95 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 96 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 97 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 98 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 99 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 100 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 101 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 102 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 103 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 104 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 105 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 106 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 107 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 108 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 109 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 110 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 111 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 112 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 113 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 114 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 115 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 116 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 117 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 118 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 119 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 120 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 121 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 122 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 123 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 124 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 125 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 126 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 127 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 128 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 129 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 130 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 131 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 132 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 133 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 134 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 135 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 136 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 137 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 138 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 139 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 140 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 141 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 142 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 143 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 144 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 145 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 146 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 147 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 148 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 149 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 150 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 151 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 152 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 153 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 154 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 155 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 156 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 157 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 158 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 159 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 160 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 161 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 162 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 163 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 164 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 165 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 166 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 167 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 168 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 169 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 170 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 171 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 172 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 173 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 174 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 175 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 176 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 177 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 178 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 179 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 180 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 181 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 182 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 183 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 184 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 185 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 186 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 187 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 188 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 189 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 190 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 191 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 192 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 193 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 194 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 195 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 196 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 197 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 198 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 199 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 200 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 201 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 202 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 203 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 204 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 205 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 206 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 207 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 208 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 209 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 210 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 211 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 212 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 213 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 214 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 215 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 216 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 217 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 218 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 219 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 220 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 221 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 222 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 223 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 224 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 225 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 226 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 227 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 228 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 229 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 230 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 231 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 232 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 233 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 234 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 235 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 236 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 237 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 238 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 239 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 240 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 241 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 242 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 243 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 244 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 245 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 246 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 247 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 248 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 249 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 250 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 251 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 252 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 253 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 254 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 255 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 256 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 257 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 258 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 259 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 260 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 261 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 262 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 263 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 264 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 265 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 266 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 267 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 268 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 269 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 270 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 271 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 272 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 273 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 274 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 275 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 276 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 277 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 278 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 279 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 280 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 281 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 282 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 283 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 284 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 285 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 286 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 287 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 288 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 289 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 290 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 291 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 292 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 293 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 294 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 295 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 296 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 297 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 298 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 299 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 300 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 301 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 302 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 303 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 304 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 305 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 306 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 307 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 308 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 309 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 310 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 311 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 312 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 313 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 314 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 315 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 316 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 317 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 318 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 319 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 320 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 321 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 322 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 323 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 324 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 325 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 326 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 327 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 328 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 329 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 330 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 331 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 332 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 333 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 334 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 335 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 336 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 337 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 338 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 339 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 340 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 341 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 342 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 343 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 344 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 345 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 346 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 347 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 348 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 349 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 350 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 351 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 352 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 353 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 354 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 355 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 356 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 357 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 358 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 359 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 360 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 361 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 362 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 363 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 364 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 365 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 366 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 367 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 368 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 369 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 370 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 371 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 372 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 373 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 374 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 375 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 376 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 377 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 378 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 379 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 380 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 381 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 382 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 383 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 384 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 385 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 386 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 387 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 388 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 389 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 390 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 391 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 392 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 393 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 394 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 395 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 396 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 397 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 398 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 399 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 400 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 401 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 402 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 403 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 404 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 405 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 406 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 407 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 408 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 409 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 410 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 411 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 412 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 413 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 414 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 415 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 416 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 417 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 418 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 419 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 420 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 421 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 422 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 423 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 424 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 425 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 426 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 427 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 428 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 429 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 430 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 431 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 432 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 433 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 434 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 435 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 436 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 437 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 438 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 439 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 440 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 441 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 442 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 443 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 444 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 445 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 446 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 447 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 448 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 449 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 450 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 451 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 452 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 453 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 454 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 455 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 456 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 457 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 458 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 459 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 460 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 461 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 462 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 463 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 464 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 465 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 466 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 467 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 468 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 469 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 470 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 471 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 472 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 473 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 474 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 475 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 476 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 477 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 478 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 479 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 480 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 481 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 482 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 483 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 484 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 485 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 486 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 487 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 488 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 489 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 490 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 491 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 492 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 493 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 494 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 495 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 496 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 497 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 498 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 499 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 500 0.15 0.001 14.0 0.055 0.0 0.0 0.0 +END period 2 + +BEGIN period 3 + 1 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 2 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 3 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 4 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 5 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 6 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 7 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 8 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 9 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 10 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 11 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 12 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 13 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 14 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 15 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 16 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 17 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 18 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 19 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 20 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 21 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 22 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 23 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 24 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 25 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 26 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 27 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 28 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 29 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 30 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 31 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 32 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 33 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 34 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 35 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 36 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 37 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 38 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 39 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 40 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 41 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 42 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 43 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 44 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 45 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 46 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 47 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 48 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 49 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 50 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 51 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 52 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 53 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 54 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 55 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 56 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 57 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 58 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 59 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 60 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 61 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 62 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 63 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 64 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 65 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 66 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 67 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 68 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 69 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 70 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 71 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 72 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 73 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 74 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 75 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 76 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 77 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 78 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 79 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 80 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 81 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 82 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 83 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 84 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 85 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 86 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 87 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 88 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 89 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 90 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 91 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 92 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 93 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 94 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 95 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 96 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 97 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 98 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 99 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 100 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 101 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 102 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 103 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 104 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 105 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 106 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 107 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 108 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 109 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 110 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 111 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 112 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 113 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 114 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 115 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 116 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 117 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 118 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 119 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 120 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 121 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 122 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 123 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 124 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 125 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 126 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 127 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 128 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 129 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 130 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 131 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 132 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 133 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 134 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 135 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 136 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 137 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 138 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 139 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 140 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 141 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 142 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 143 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 144 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 145 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 146 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 147 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 148 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 149 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 150 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 151 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 152 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 153 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 154 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 155 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 156 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 157 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 158 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 159 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 160 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 161 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 162 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 163 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 164 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 165 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 166 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 167 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 168 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 169 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 170 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 171 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 172 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 173 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 174 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 175 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 176 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 177 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 178 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 179 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 180 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 181 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 182 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 183 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 184 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 185 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 186 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 187 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 188 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 189 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 190 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 191 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 192 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 193 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 194 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 195 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 196 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 197 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 198 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 199 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 200 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 201 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 202 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 203 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 204 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 205 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 206 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 207 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 208 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 209 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 210 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 211 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 212 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 213 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 214 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 215 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 216 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 217 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 218 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 219 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 220 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 221 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 222 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 223 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 224 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 225 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 226 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 227 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 228 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 229 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 230 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 231 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 232 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 233 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 234 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 235 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 236 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 237 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 238 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 239 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 240 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 241 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 242 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 243 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 244 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 245 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 246 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 247 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 248 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 249 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 250 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 251 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 252 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 253 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 254 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 255 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 256 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 257 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 258 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 259 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 260 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 261 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 262 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 263 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 264 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 265 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 266 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 267 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 268 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 269 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 270 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 271 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 272 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 273 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 274 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 275 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 276 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 277 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 278 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 279 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 280 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 281 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 282 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 283 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 284 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 285 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 286 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 287 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 288 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 289 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 290 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 291 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 292 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 293 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 294 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 295 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 296 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 297 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 298 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 299 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 300 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 301 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 302 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 303 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 304 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 305 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 306 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 307 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 308 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 309 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 310 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 311 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 312 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 313 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 314 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 315 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 316 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 317 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 318 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 319 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 320 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 321 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 322 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 323 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 324 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 325 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 326 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 327 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 328 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 329 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 330 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 331 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 332 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 333 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 334 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 335 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 336 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 337 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 338 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 339 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 340 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 341 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 342 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 343 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 344 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 345 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 346 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 347 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 348 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 349 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 350 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 351 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 352 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 353 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 354 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 355 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 356 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 357 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 358 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 359 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 360 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 361 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 362 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 363 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 364 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 365 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 366 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 367 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 368 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 369 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 370 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 371 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 372 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 373 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 374 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 375 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 376 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 377 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 378 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 379 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 380 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 381 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 382 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 383 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 384 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 385 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 386 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 387 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 388 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 389 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 390 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 391 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 392 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 393 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 394 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 395 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 396 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 397 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 398 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 399 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 400 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 401 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 402 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 403 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 404 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 405 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 406 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 407 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 408 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 409 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 410 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 411 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 412 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 413 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 414 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 415 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 416 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 417 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 418 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 419 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 420 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 421 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 422 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 423 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 424 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 425 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 426 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 427 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 428 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 429 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 430 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 431 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 432 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 433 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 434 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 435 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 436 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 437 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 438 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 439 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 440 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 441 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 442 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 443 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 444 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 445 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 446 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 447 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 448 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 449 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 450 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 451 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 452 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 453 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 454 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 455 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 456 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 457 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 458 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 459 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 460 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 461 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 462 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 463 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 464 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 465 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 466 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 467 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 468 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 469 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 470 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 471 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 472 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 473 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 474 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 475 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 476 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 477 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 478 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 479 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 480 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 481 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 482 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 483 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 484 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 485 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 486 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 487 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 488 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 489 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 490 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 491 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 492 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 493 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 494 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 495 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 496 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 497 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 498 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 499 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 500 0.15 0.001 14.0 0.055 0.0 0.0 0.0 +END period 3 + +BEGIN period 4 + 1 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 2 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 3 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 4 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 5 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 6 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 7 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 8 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 9 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 10 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 11 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 12 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 13 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 14 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 15 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 16 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 17 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 18 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 19 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 20 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 21 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 22 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 23 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 24 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 25 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 26 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 27 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 28 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 29 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 30 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 31 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 32 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 33 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 34 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 35 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 36 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 37 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 38 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 39 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 40 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 41 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 42 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 43 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 44 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 45 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 46 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 47 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 48 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 49 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 50 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 51 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 52 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 53 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 54 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 55 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 56 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 57 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 58 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 59 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 60 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 61 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 62 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 63 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 64 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 65 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 66 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 67 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 68 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 69 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 70 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 71 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 72 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 73 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 74 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 75 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 76 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 77 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 78 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 79 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 80 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 81 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 82 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 83 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 84 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 85 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 86 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 87 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 88 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 89 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 90 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 91 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 92 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 93 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 94 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 95 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 96 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 97 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 98 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 99 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 100 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 101 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 102 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 103 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 104 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 105 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 106 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 107 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 108 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 109 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 110 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 111 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 112 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 113 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 114 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 115 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 116 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 117 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 118 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 119 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 120 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 121 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 122 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 123 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 124 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 125 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 126 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 127 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 128 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 129 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 130 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 131 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 132 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 133 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 134 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 135 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 136 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 137 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 138 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 139 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 140 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 141 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 142 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 143 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 144 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 145 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 146 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 147 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 148 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 149 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 150 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 151 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 152 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 153 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 154 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 155 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 156 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 157 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 158 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 159 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 160 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 161 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 162 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 163 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 164 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 165 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 166 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 167 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 168 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 169 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 170 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 171 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 172 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 173 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 174 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 175 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 176 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 177 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 178 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 179 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 180 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 181 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 182 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 183 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 184 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 185 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 186 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 187 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 188 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 189 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 190 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 191 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 192 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 193 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 194 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 195 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 196 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 197 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 198 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 199 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 200 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 201 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 202 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 203 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 204 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 205 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 206 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 207 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 208 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 209 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 210 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 211 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 212 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 213 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 214 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 215 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 216 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 217 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 218 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 219 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 220 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 221 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 222 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 223 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 224 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 225 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 226 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 227 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 228 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 229 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 230 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 231 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 232 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 233 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 234 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 235 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 236 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 237 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 238 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 239 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 240 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 241 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 242 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 243 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 244 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 245 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 246 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 247 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 248 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 249 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 250 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 251 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 252 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 253 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 254 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 255 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 256 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 257 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 258 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 259 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 260 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 261 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 262 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 263 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 264 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 265 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 266 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 267 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 268 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 269 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 270 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 271 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 272 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 273 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 274 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 275 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 276 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 277 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 278 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 279 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 280 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 281 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 282 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 283 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 284 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 285 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 286 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 287 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 288 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 289 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 290 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 291 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 292 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 293 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 294 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 295 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 296 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 297 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 298 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 299 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 300 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 301 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 302 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 303 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 304 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 305 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 306 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 307 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 308 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 309 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 310 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 311 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 312 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 313 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 314 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 315 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 316 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 317 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 318 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 319 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 320 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 321 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 322 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 323 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 324 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 325 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 326 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 327 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 328 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 329 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 330 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 331 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 332 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 333 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 334 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 335 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 336 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 337 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 338 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 339 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 340 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 341 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 342 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 343 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 344 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 345 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 346 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 347 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 348 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 349 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 350 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 351 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 352 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 353 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 354 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 355 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 356 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 357 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 358 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 359 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 360 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 361 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 362 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 363 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 364 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 365 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 366 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 367 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 368 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 369 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 370 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 371 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 372 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 373 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 374 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 375 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 376 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 377 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 378 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 379 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 380 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 381 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 382 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 383 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 384 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 385 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 386 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 387 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 388 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 389 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 390 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 391 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 392 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 393 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 394 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 395 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 396 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 397 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 398 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 399 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 400 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 401 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 402 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 403 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 404 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 405 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 406 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 407 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 408 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 409 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 410 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 411 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 412 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 413 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 414 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 415 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 416 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 417 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 418 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 419 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 420 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 421 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 422 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 423 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 424 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 425 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 426 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 427 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 428 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 429 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 430 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 431 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 432 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 433 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 434 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 435 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 436 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 437 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 438 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 439 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 440 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 441 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 442 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 443 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 444 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 445 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 446 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 447 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 448 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 449 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 450 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 451 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 452 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 453 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 454 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 455 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 456 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 457 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 458 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 459 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 460 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 461 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 462 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 463 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 464 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 465 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 466 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 467 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 468 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 469 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 470 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 471 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 472 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 473 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 474 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 475 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 476 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 477 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 478 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 479 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 480 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 481 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 482 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 483 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 484 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 485 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 486 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 487 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 488 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 489 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 490 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 491 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 492 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 493 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 494 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 495 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 496 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 497 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 498 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 499 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 500 0.15 0.001 14.0 0.055 0.0 0.0 0.0 +END period 4 + +BEGIN period 5 + 1 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 2 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 3 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 4 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 5 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 6 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 7 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 8 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 9 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 10 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 11 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 12 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 13 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 14 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 15 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 16 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 17 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 18 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 19 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 20 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 21 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 22 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 23 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 24 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 25 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 26 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 27 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 28 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 29 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 30 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 31 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 32 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 33 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 34 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 35 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 36 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 37 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 38 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 39 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 40 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 41 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 42 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 43 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 44 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 45 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 46 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 47 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 48 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 49 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 50 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 51 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 52 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 53 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 54 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 55 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 56 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 57 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 58 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 59 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 60 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 61 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 62 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 63 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 64 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 65 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 66 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 67 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 68 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 69 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 70 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 71 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 72 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 73 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 74 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 75 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 76 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 77 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 78 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 79 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 80 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 81 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 82 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 83 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 84 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 85 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 86 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 87 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 88 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 89 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 90 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 91 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 92 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 93 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 94 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 95 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 96 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 97 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 98 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 99 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 100 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 101 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 102 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 103 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 104 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 105 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 106 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 107 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 108 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 109 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 110 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 111 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 112 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 113 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 114 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 115 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 116 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 117 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 118 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 119 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 120 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 121 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 122 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 123 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 124 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 125 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 126 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 127 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 128 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 129 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 130 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 131 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 132 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 133 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 134 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 135 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 136 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 137 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 138 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 139 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 140 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 141 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 142 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 143 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 144 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 145 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 146 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 147 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 148 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 149 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 150 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 151 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 152 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 153 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 154 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 155 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 156 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 157 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 158 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 159 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 160 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 161 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 162 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 163 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 164 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 165 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 166 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 167 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 168 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 169 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 170 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 171 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 172 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 173 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 174 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 175 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 176 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 177 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 178 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 179 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 180 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 181 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 182 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 183 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 184 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 185 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 186 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 187 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 188 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 189 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 190 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 191 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 192 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 193 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 194 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 195 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 196 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 197 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 198 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 199 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 200 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 201 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 202 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 203 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 204 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 205 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 206 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 207 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 208 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 209 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 210 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 211 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 212 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 213 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 214 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 215 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 216 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 217 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 218 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 219 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 220 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 221 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 222 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 223 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 224 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 225 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 226 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 227 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 228 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 229 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 230 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 231 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 232 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 233 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 234 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 235 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 236 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 237 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 238 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 239 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 240 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 241 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 242 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 243 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 244 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 245 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 246 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 247 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 248 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 249 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 250 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 251 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 252 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 253 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 254 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 255 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 256 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 257 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 258 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 259 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 260 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 261 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 262 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 263 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 264 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 265 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 266 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 267 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 268 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 269 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 270 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 271 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 272 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 273 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 274 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 275 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 276 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 277 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 278 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 279 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 280 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 281 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 282 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 283 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 284 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 285 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 286 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 287 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 288 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 289 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 290 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 291 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 292 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 293 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 294 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 295 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 296 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 297 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 298 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 299 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 300 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 301 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 302 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 303 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 304 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 305 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 306 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 307 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 308 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 309 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 310 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 311 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 312 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 313 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 314 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 315 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 316 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 317 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 318 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 319 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 320 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 321 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 322 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 323 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 324 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 325 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 326 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 327 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 328 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 329 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 330 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 331 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 332 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 333 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 334 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 335 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 336 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 337 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 338 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 339 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 340 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 341 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 342 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 343 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 344 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 345 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 346 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 347 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 348 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 349 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 350 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 351 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 352 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 353 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 354 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 355 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 356 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 357 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 358 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 359 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 360 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 361 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 362 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 363 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 364 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 365 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 366 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 367 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 368 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 369 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 370 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 371 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 372 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 373 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 374 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 375 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 376 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 377 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 378 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 379 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 380 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 381 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 382 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 383 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 384 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 385 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 386 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 387 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 388 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 389 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 390 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 391 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 392 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 393 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 394 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 395 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 396 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 397 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 398 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 399 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 400 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 401 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 402 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 403 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 404 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 405 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 406 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 407 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 408 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 409 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 410 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 411 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 412 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 413 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 414 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 415 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 416 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 417 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 418 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 419 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 420 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 421 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 422 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 423 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 424 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 425 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 426 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 427 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 428 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 429 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 430 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 431 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 432 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 433 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 434 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 435 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 436 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 437 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 438 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 439 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 440 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 441 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 442 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 443 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 444 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 445 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 446 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 447 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 448 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 449 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 450 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 451 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 452 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 453 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 454 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 455 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 456 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 457 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 458 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 459 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 460 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 461 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 462 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 463 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 464 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 465 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 466 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 467 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 468 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 469 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 470 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 471 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 472 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 473 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 474 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 475 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 476 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 477 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 478 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 479 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 480 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 481 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 482 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 483 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 484 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 485 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 486 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 487 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 488 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 489 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 490 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 491 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 492 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 493 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 494 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 495 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 496 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 497 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 498 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 499 0.15 0.001 14.0 0.055 0.0 0.0 0.0 + 500 0.15 0.001 14.0 0.055 0.0 0.0 0.0 +END period 5 + diff --git a/examples/data/mf6/netcdf/uzf02/uzf02.uzf.obs b/examples/data/mf6/netcdf/uzf02/uzf02.uzf.obs new file mode 100644 index 0000000000..3127e8769e --- /dev/null +++ b/examples/data/mf6/netcdf/uzf02/uzf02.uzf.obs @@ -0,0 +1,47 @@ +# File generated by Flopy version 3.10.0.dev3 on 08/14/2025 at 10:40:32. +BEGIN options +END options + +BEGIN continuous FILEOUT uzf02.uzfobs + uzet_41 uzet 41 + uzf-gwet_41 uzf-gwet 41 + uzet_42 uzet 42 + uzf-gwet_42 uzf-gwet 42 + uzet_43 uzet 43 + uzf-gwet_43 uzf-gwet 43 + uzet_44 uzet 44 + uzf-gwet_44 uzf-gwet 44 + uzet_45 uzet 45 + uzf-gwet_45 uzf-gwet 45 + uzet_46 uzet 46 + uzf-gwet_46 uzf-gwet 46 + uzet_47 uzet 47 + uzf-gwet_47 uzf-gwet 47 + uzet_48 uzet 48 + uzf-gwet_48 uzf-gwet 48 + uzet_49 uzet 49 + uzf-gwet_49 uzf-gwet 49 + uzet_50 uzet 50 + uzf-gwet_50 uzf-gwet 50 + uzet_141 uzet 141 + uzf-gwet_141 uzf-gwet 141 + uzet_142 uzet 142 + uzf-gwet_142 uzf-gwet 142 + uzet_143 uzet 143 + uzf-gwet_143 uzf-gwet 143 + uzet_144 uzet 144 + uzf-gwet_144 uzf-gwet 144 + uzet_145 uzet 145 + uzf-gwet_145 uzf-gwet 145 + uzet_146 uzet 146 + uzf-gwet_146 uzf-gwet 146 + uzet_147 uzet 147 + uzf-gwet_147 uzf-gwet 147 + uzet_148 uzet 148 + uzf-gwet_148 uzf-gwet 148 + uzet_149 uzet 149 + uzf-gwet_149 uzf-gwet 149 + uzet_150 uzet 150 + uzf-gwet_150 uzf-gwet 150 +END continuous FILEOUT uzf02.uzfobs + diff --git a/examples/data/mf6/netcdf/uzf02/uzf02.uzfobs b/examples/data/mf6/netcdf/uzf02/uzf02.uzfobs new file mode 100644 index 0000000000..e2e4ca9441 --- /dev/null +++ b/examples/data/mf6/netcdf/uzf02/uzf02.uzfobs @@ -0,0 +1,26 @@ +time,UZET_41,UZF-GWET_41,UZET_42,UZF-GWET_42,UZET_43,UZF-GWET_43,UZET_44,UZF-GWET_44,UZET_45,UZF-GWET_45,UZET_46,UZF-GWET_46,UZET_47,UZF-GWET_47,UZET_48,UZF-GWET_48,UZET_49,UZF-GWET_49,UZET_50,UZF-GWET_50,UZET_141,UZF-GWET_141,UZET_142,UZF-GWET_142,UZET_143,UZF-GWET_143,UZET_144,UZF-GWET_144,UZET_145,UZF-GWET_145,UZET_146,UZF-GWET_146,UZET_147,UZF-GWET_147,UZET_148,UZF-GWET_148,UZET_149,UZF-GWET_149,UZET_150,UZF-GWET_150 +2.000000000000,-0.34821428571427115E-3,-0.47501789210262950E-3,-0.34821428571427115E-3,-0.47341442005392319E-3,-0.34821428571427115E-3,-0.47027372704428372E-3,-0.34821428571427115E-3,-0.46572728044588123E-3,-0.34821428571427115E-3,-0.46000374346177647E-3,-0.34821428571427115E-3,-0.45324902118874257E-3,-0.34821428571427115E-3,-0.44513984825862469E-3,-0.34821428571427115E-3,-0.43394100440266798E-3,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,0.0000000000000000,-0.12007061161391469E-3,0.0000000000000000,-0.12055932383962553E-3,0.0000000000000000,-0.12148013622134517E-3,0.0000000000000000,-0.12273832553540923E-3,0.0000000000000000,-0.12422817189647475E-3,0.0000000000000000,-0.12586859877776897E-3,0.0000000000000000,-0.12764974422383124E-3,0.0000000000000000,-0.13051198013343561E-3,-0.46659051380654182E-4,-0.35109402053951208E-3,-0.56533591941687611E-4,-0.33634676199215086E-3 +4.000000000000,-0.27713573672519254E-3,-0.52253274311983935E-3,-0.28058925999990825E-3,-0.51755181282748088E-3,-0.28733320746801572E-3,-0.50789395717781892E-3,-0.29713004327339143E-3,-0.49402617606886180E-3,-0.31005899605235365E-3,-0.47601858892828194E-3,-0.32670492491193581E-3,-0.45332625813784356E-3,-0.34760733886546324E-3,-0.42561618430220346E-3,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,0.0000000000000000,-0.13411108519281663E-3,0.0000000000000000,-0.13432448896523664E-3,0.0000000000000000,-0.13465863570420603E-3,0.0000000000000000,-0.13499384163069274E-3,0.0000000000000000,-0.13509969203675252E-3,0.0000000000000000,-0.13457818066261356E-3,0.0000000000000000,-0.13307117897596839E-3,-0.63115355426593411E-4,-0.32668331991698681E-3,-0.80830079402419153E-4,-0.30133464956921797E-3,-0.93055493656554744E-4,-0.28440242186135076E-3 +6.000000000000,-0.28677693792794612E-3,-0.50868713627143670E-3,-0.29058022253414251E-3,-0.50327642065970063E-3,-0.29804387906473551E-3,-0.49274239571847904E-3,-0.30942344117007581E-3,-0.47689598360537485E-3,-0.32529574885095669E-3,-0.45522582651857609E-3,-0.34654498449659954E-3,-0.42700345728654003E-3,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,0.0000000000000000,-0.13469962366776793E-3,0.0000000000000000,-0.13489794375472832E-3,0.0000000000000000,-0.13521680714888019E-3,0.0000000000000000,-0.13542274148888430E-3,0.0000000000000000,-0.13514498072090045E-3,0.0000000000000000,-0.13397749787980663E-3,-0.60054241322264612E-4,-0.33116115798129001E-3,-0.79576028877828553E-4,-0.30309744731355458E-3,-0.98120635750362228E-4,-0.27752155603274857E-3,-0.11105591020321715E-3,-0.26030655140544504E-3 +8.000000000000,-0.29535709511199770E-3,-0.49652162340898783E-3,-0.29937630825538353E-3,-0.49087355743385338E-3,-0.30766794278302978E-3,-0.47932367745027169E-3,-0.32072461636098448E-3,-0.46141504681791127E-3,-0.34034995473325447E-3,-0.43513818222041558E-3,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,0.0000000000000000,-0.13531283948083468E-3,0.0000000000000000,-0.13548529403928161E-3,0.0000000000000000,-0.13566697313279900E-3,0.0000000000000000,-0.13562389017047566E-3,0.0000000000000000,-0.13520964979021530E-3,-0.49852905394227875E-4,-0.34629138209619072E-3,-0.68319192510504778E-4,-0.31913706554810171E-3,-0.88068041043498635E-4,-0.29125467079443011E-3,-0.10683213407136627E-3,-0.26587134152241158E-3,-0.11995785676022219E-3,-0.24875757869481934E-3 +10.00000000000,-0.30155290664507506E-3,-0.48782834221594920E-3,-0.30594404927972163E-3,-0.48171366273022176E-3,-0.31476900226326543E-3,-0.46954152025929004E-3,-0.32825049903648096E-3,-0.45124739204473532E-3,-0.34708410345579699E-3,-0.42629916796011272E-3,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,0.0000000000000000,-0.13577707714385853E-3,0.0000000000000000,-0.13587632460551770E-3,0.0000000000000000,-0.13594226018766878E-3,0.0000000000000000,-0.13573741243190723E-3,0.0000000000000000,-0.13495913750981374E-3,-0.53421891114499576E-4,-0.34096168469756663E-3,-0.72295455357342109E-4,-0.31342696055411704E-3,-0.92254151021015085E-4,-0.28549823790316377E-3,-0.11114323572096463E-3,-0.26019207835468376E-3,-0.12436292200608545E-3,-0.24313258614977039E-3 +12.00000000000,-0.30771518635080986E-3,-0.47925826320928400E-3,-0.31239003473124138E-3,-0.47280746433689836E-3,-0.32189724933207042E-3,-0.45982334046340860E-3,-0.33810390369551357E-3,-0.43810644230312659E-3,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,0.0000000000000000,-0.13599538701833500E-3,0.0000000000000000,-0.13604858805654714E-3,0.0000000000000000,-0.13602893192599119E-3,0.0000000000000000,-0.13595403417751334E-3,-0.42761141578050166E-4,-0.35699776778992339E-3,-0.58348482651550404E-4,-0.33366883941598027E-3,-0.76417765016573158E-4,-0.30755834585348744E-3,-0.95855541383180198E-4,-0.28058889796404158E-3,-0.11440879225804723E-3,-0.25592813248103362E-3,-0.12746208906692924E-3,-0.23921082560527545E-3 +14.00000000000,-0.31258573714226801E-3,-0.47253836878022944E-3,-0.31725360080014919E-3,-0.46614264562036133E-3,-0.32665887338334532E-3,-0.45338827279338152E-3,-0.34220732773609930E-3,-0.43269119968408569E-3,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,0.0000000000000000,-0.13618130536491475E-3,0.0000000000000000,-0.13620282024997974E-3,0.0000000000000000,-0.13612219530567134E-3,0.0000000000000000,-0.13592583294302180E-3,-0.44078775436377782E-4,-0.35499687243169644E-3,-0.60135546789089056E-4,-0.33104185156345052E-3,-0.78332844719433692E-4,-0.30484973349271768E-3,-0.97754041013350945E-4,-0.27801692291284984E-3,-0.11624786920096108E-3,-0.25354119171892039E-3,-0.12926174108211785E-3,-0.23694702883544656E-3 +16.00000000000,-0.31632687224844580E-3,-0.46740894560958618E-3,-0.32092829103330667E-3,-0.46113838591893335E-3,-0.33013260464237471E-3,-0.44872232736319539E-3,-0.34486241552161845E-3,-0.42920525459616164E-3,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,0.0000000000000000,-0.13629856655846104E-3,0.0000000000000000,-0.13628757704653503E-3,0.0000000000000000,-0.13613608538718059E-3,0.0000000000000000,-0.13574988511860835E-3,-0.45966023920929189E-4,-0.35214026746952724E-3,-0.61791353160776941E-4,-0.32861655920972052E-3,-0.79826437005312001E-4,-0.30274506723042651E-3,-0.99116685788436243E-4,-0.27617771019356429E-3,-0.11750895427001251E-3,-0.25191042573059091E-3,-0.13046660094950813E-3,-0.23543697989744411E-3 +18.00000000000,-0.31921124385120292E-3,-0.46347333049862136E-3,-0.32375240023224672E-3,-0.45731081619165012E-3,-0.33279162739188739E-3,-0.44516701247835960E-3,-0.34682436618899137E-3,-0.42663840860440499E-3,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,0.0000000000000000,-0.13636946612468459E-3,0.0000000000000000,-0.13633183978974860E-3,0.0000000000000000,-0.13612290764633588E-3,0.0000000000000000,-0.13557551807184777E-3,-0.47523360924978109E-4,-0.34979125828395467E-3,-0.63146785956209872E-4,-0.32663749205319372E-3,-0.81017809407016306E-4,-0.30107117592469612E-3,-0.10017176658203009E-3,-0.27475754180950594E-3,-0.11845885166293646E-3,-0.25068529279803013E-3,-0.13135687531211360E-3,-0.23432406221234059E-3 +20.00000000000,-0.32252698597562413E-3,-0.45896968473126500E-3,-0.32741169235675494E-3,-0.45237503157840176E-3,-0.33877549720975897E-3,-0.43721784309018669E-3,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,0.0000000000000000,-0.13635135454788573E-3,0.0000000000000000,-0.13629654605946793E-3,0.0000000000000000,-0.13632725465649413E-3,-0.38238355576255406E-4,-0.36390638000206280E-3,-0.50244979534791456E-4,-0.34570397235415048E-3,-0.65257143584115940E-4,-0.32356738086538879E-3,-0.82704251226375924E-4,-0.29870915275837244E-3,-0.10154648745382133E-3,-0.27291225374431001E-3,-0.11960926930160154E-3,-0.24920524540089303E-3,-0.13237985537209451E-3,-0.23304825412567810E-3 +22.00000000000,-0.32463439046986897E-3,-0.45611870653599717E-3,-0.32942926101295344E-3,-0.44966511598563117E-3,-0.34050753884759533E-3,-0.43493030631685462E-3,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,0.0000000000000000,-0.13641689736794215E-3,0.0000000000000000,-0.13636794942024813E-3,0.0000000000000000,-0.13643643235389594E-3,-0.38238355576258876E-4,-0.36428463589372503E-3,-0.50470290921159067E-4,-0.34536662180418231E-3,-0.65745074748428667E-4,-0.32285949227931540E-3,-0.83285650709169490E-4,-0.29789687007827600E-3,-0.10214305533350065E-3,-0.27211328486126071E-3,-0.12019114414130172E-3,-0.24845819214959681E-3,-0.13294765496013605E-3,-0.23234150949839102E-3 +24.00000000000,-0.32599783189693476E-3,-0.45427892260762638E-3,-0.33069101329527562E-3,-0.44797451968368743E-3,-0.34143402376612952E-3,-0.43370914505286946E-3,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,0.0000000000000000,-0.13645115243014283E-3,0.0000000000000000,-0.13638845853384889E-3,0.0000000000000000,-0.13640655970007437E-3,-0.38345947038984085E-4,-0.36374130387218111E-3,-0.50914686042402568E-4,-0.34470170175418210E-3,-0.66163551911441187E-4,-0.32225294923806064E-3,-0.83685225249874318E-4,-0.29733921936617267E-3,-0.10252277461851589E-3,-0.27160530480663041E-3,-0.12055222363104023E-3,-0.24799513441423299E-3,-0.13329779537399333E-3,-0.23190618008152991E-3 +26.00000000000,-0.32694903368818418E-3,-0.45299760325325856E-3,-0.33157831064761778E-3,-0.44678755479668811E-3,-0.34209164963502903E-3,-0.43284339747994634E-3,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,0.0000000000000000,-0.13646616158495572E-3,0.0000000000000000,-0.13639065729897693E-3,0.0000000000000000,-0.13636316543091161E-3,-0.38822880027836182E-4,-0.36300997973679849E-3,-0.51327534002269803E-4,-0.34408452669463217E-3,-0.66527687610090647E-4,-0.32172560623169866E-3,-0.84009500922133107E-4,-0.29688701672882917E-3,-0.10281348355857534E-3,-0.27121669984514680E-3,-0.12081668499594467E-3,-0.24765623608934400E-3,-0.13354736094975395E-3,-0.23159612493951418E-3 +28.00000000000,-0.32764455130662729E-3,-0.45206184938767241E-3,-0.33223391962863835E-3,-0.44591153809451860E-3,-0.34258530251221608E-3,-0.43219408447293321E-3,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,0.0000000000000000,-0.13647325569106746E-3,0.0000000000000000,-0.13638841996685305E-3,0.0000000000000000,-0.13632632572502349E-3,-0.39200482056959973E-4,-0.36243146373423046E-3,-0.51658876142307575E-4,-0.34358957470862525E-3,-0.66819888231706148E-4,-0.32130273417797295E-3,-0.84267218274883260E-4,-0.29652785983281594E-3,-0.10304109283106044E-3,-0.27091262431907495E-3,-0.12102033801209000E-3,-0.24739540787791230E-3,-0.13373709696318237E-3,-0.23136052869756306E-3 +30.00000000000,-0.32816441639335614E-3,-0.45136305140007298E-3,-0.33272697567410248E-3,-0.44525328899301078E-3,-0.34295875224252259E-3,-0.43170320125468617E-3,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,0.0000000000000000,-0.13647705071172082E-3,0.0000000000000000,-0.13638548981385087E-3,0.0000000000000000,-0.13629729551875222E-3,-0.39490840505071456E-4,-0.36198690932246272E-3,-0.51915238284815091E-4,-0.34320685743834307E-3,-0.67046763654828034E-4,-0.32097458127963794E-3,-0.84467462505927182E-4,-0.29624893862051707E-3,-0.10321763847066789E-3,-0.27067687693063626E-3,-0.12117777095700666E-3,-0.24719386315146072E-3,-0.13388329702507251E-3,-0.23117906663670167E-3 +32.00000000000,-0.32855668799763871E-3,-0.45083612123270500E-3,-0.33310015552040495E-3,-0.44475540256689434E-3,-0.34324156524856431E-3,-0.43133164161714122E-3,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,0.0000000000000000,-0.13647930029581031E-3,0.0000000000000000,-0.13638280322320686E-3,0.0000000000000000,-0.13627481507519893E-3,-0.39712377102901764E-4,-0.36164789885651763E-3,-0.52111365205415039E-4,-0.34291420013173870E-3,-0.67220723193782417E-4,-0.32072307296451786E-3,-0.84621248235230984E-4,-0.29603481322428872E-3,-0.10335334562144860E-3,-0.27049572757689103E-3,-0.12129881923643449E-3,-0.24703894957786696E-3,-0.13399569856648030E-3,-0.23103959955915443E-3 +34.00000000000,-0.32885387736333871E-3,-0.45043711793022812E-3,-0.33338329516879872E-3,-0.44437783116000934E-3,-0.34345584907771731E-3,-0.43105022211026192E-3,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,0.0000000000000000,-0.13648073067705767E-3,0.0000000000000000,-0.13638055965211227E-3,0.0000000000000000,-0.13625751831063495E-3,-0.39880971734657100E-4,-0.36139000472447023E-3,-0.52260806403997551E-4,-0.34269128570946553E-3,-0.67353434364855747E-4,-0.32053126341559271E-3,-0.84738699676498164E-4,-0.29587132718458494E-3,-0.10345709049371099E-3,-0.27035728134052535E-3,-0.12139143111745965E-3,-0.24692045832276567E-3,-0.13408173950134328E-3,-0.23093286674114391E-3 +36.00000000000,-0.32907942045992677E-3,-0.45013442405039141E-3,-0.33359833018925533E-3,-0.44409118552653703E-3,-0.34361829616669226E-3,-0.43083694112710179E-3,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,0.0000000000000000,-0.13648168323829101E-3,0.0000000000000000,-0.13637875574948761E-3,0.0000000000000000,-0.13624425671400980E-3,-0.40009154847808759E-4,-0.36119398524133740E-3,-0.52374492298723485E-4,-0.34252175169108626E-3,-0.67454455797893997E-4,-0.32038529177154959E-3,-0.84828162348171698E-4,-0.29574682839558536E-3,-0.10353616333252935E-3,-0.27025178180360482E-3,-0.12146206106099233E-3,-0.24683010930589362E-3,-0.13414738719255692E-3,-0.23085144679197540E-3 +38.00000000000,-0.32925071680101059E-3,-0.44990460091195300E-3,-0.33376170668125704E-3,-0.44387346348426845E-3,-0.34374150713661900E-3,-0.43067520945531219E-3,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,0.0000000000000000,-0.13648233799361790E-3,0.0000000000000000,-0.13637733320343178E-3,0.0000000000000000,-0.13623411279618667E-3,-0.40106575955815849E-4,-0.36104504118502463E-3,-0.52460918194614792E-4,-0.34239289566321109E-3,-0.67531277662034872E-4,-0.32027430842938105E-3,-0.84896217347543712E-4,-0.29565213757690818E-3,-0.10359633632683884E-3,-0.27017151142717907E-3,-0.12151582845126851E-3,-0.24676134093635008E-3,-0.13419737554559719E-3,-0.23078945730107842E-3 +40.00000000000,-0.32938085422501429E-3,-0.44973003867996545E-3,-0.33388585295154871E-3,-0.44370805689807904E-3,-0.34383499683168783E-3,-0.43055251138285355E-3,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,0.0000000000000000,-0.13648279844833062E-3,0.0000000000000000,-0.13637622414082511E-3,0.0000000000000000,-0.13622636672165747E-3,-0.40180605369873057E-4,-0.36093187942432112E-3,-0.52526600110770760E-4,-0.34229498304138314E-3,-0.67589669292017485E-4,-0.32018996307081149E-3,-0.84947954075612064E-4,-0.29558016129202860E-3,-0.10364208947867548E-3,-0.27011048446089431E-3,-0.12155671880118013E-3,-0.24670904821050645E-3,-0.13423539755648417E-3,-0.23074231215005302E-3 +42.00000000000,-0.32947973486294191E-3,-0.44959742595947053E-3,-0.33398019311403626E-3,-0.44358238316439841E-3,-0.34390595755104392E-3,-0.43045939253702091E-3,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,0.0000000000000000,-0.13648312789668539E-3,0.0000000000000000,-0.13637536582149267E-3,0.0000000000000000,-0.13622045903710736E-3,-0.40236855697595164E-4,-0.36084590611405699E-3,-0.52576510026422496E-4,-0.34222059073357606E-3,-0.67634042448874720E-4,-0.32012587406849780E-3,-0.84987273124129126E-4,-0.29552546601160359E-3,-0.10367686419036903E-3,-0.27006410517337552E-3,-0.12158780038830364E-3,-0.24666930287749916E-3,-0.13426430094379271E-3,-0.23070647654625895E-3 +44.00000000000,-0.32955486919822308E-3,-0.44949667341579792E-3,-0.33405188305937750E-3,-0.44348689445676255E-3,-0.34395983147098019E-3,-0.43038870272358492E-3,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,0.0000000000000000,-0.13648336677198628E-3,0.0000000000000000,-0.13637470486555506E-3,0.0000000000000000,-0.13621595761337583E-3,-0.40279595370687482E-4,-0.36078058905371574E-3,-0.52614432696169999E-4,-0.34216407090525557E-3,-0.67667759042183695E-4,-0.32007718058505504E-3,-0.85017150385405249E-4,-0.29548390802560981E-3,-0.10370328935868645E-3,-0.27002886421417457E-3,-0.12161142009126102E-3,-0.24663910135197238E-3,-0.13428626605462013E-3,-0.23067924501617859E-3 +46.00000000000,-0.32961196028655215E-3,-0.44942012379084473E-3,-0.33410635982467074E-3,-0.44341434002594105E-3,-0.34400074054879992E-3,-0.43033502840051480E-3,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,0.0000000000000000,-0.13648354178969439E-3,0.0000000000000000,-0.13637419767781621E-3,0.0000000000000000,-0.13621253009276545E-3,-0.40312068963370418E-4,-0.36073096491530095E-3,-0.52643246306896541E-4,-0.34212113014022263E-3,-0.67693377061278648E-4,-0.32004018538964586E-3,-0.85039851510940179E-4,-0.29545233356716486E-3,-0.10372336779557179E-3,-0.27000208877377347E-3,-0.12162936710422012E-3,-0.24661615440226415E-3,-0.13430295602358255E-3,-0.23065855440123392E-3 +48.00000000000,-0.32965534084536396E-3,-0.44936196205714056E-3,-0.33414775544243769E-3,-0.44335921158233558E-3,-0.34403180917738641E-3,-0.43029426737107614E-3,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,0.0000000000000000,-0.13648367107459950E-3,0.0000000000000000,-0.13637380946989467E-3,0.0000000000000000,-0.13620992164091656E-3,-0.40336742220162258E-4,-0.36069326293163831E-3,-0.52665138593081806E-4,-0.34208850589006090E-3,-0.67712841348981090E-4,-0.32001207819545060E-3,-0.85057099590737928E-4,-0.29542834467821482E-3,-0.10373862320271765E-3,-0.26998174587347082E-3,-0.12164300308645237E-3,-0.24659872017290384E-3,-0.13431563692023474E-3,-0.23064283441917685E-3 +50.00000000000,-0.32968830286878026E-3,-0.44931777131093089E-3,-0.33417921007891649E-3,-0.44331732429112882E-3,-0.34405540675311319E-3,-0.43026330940981131E-3,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,-0.34821428571427115E-3,0.0000000000000000,0.0000000000000000,-0.13648376718493842E-3,0.0000000000000000,-0.13637351287806577E-3,0.0000000000000000,-0.13620793732207191E-3,-0.40355488608374845E-4,-0.36066461875160487E-3,-0.52681771884832695E-4,-0.34206371966432280E-3,-0.67727629816195789E-4,-0.31999072384643909E-3,-0.85070204181136866E-4,-0.29541011922692669E-3,-0.10375021376489435E-3,-0.26996629047685422E-3,-0.12165336321130760E-3,-0.24658547465901209E-3,-0.13432527137167971E-3,-0.23063089131884415E-3 diff --git a/flopy/mf6/mfmodel.py b/flopy/mf6/mfmodel.py index 7d69eb7b24..f2b783cacb 100644 --- a/flopy/mf6/mfmodel.py +++ b/flopy/mf6/mfmodel.py @@ -1355,7 +1355,7 @@ def write( # write packages for pp in self.packagelist: if write_netcdf: - # reset data storage to write ascii for netcdf + # set data storage to write ascii for netcdf pp._set_netcdf_storage() if ( @@ -2029,9 +2029,12 @@ def register_package( pkg_type = package.package_type.upper() if ( package.package_type != "obs" and + (self.structure.pkg_spec[ + package.package_type + ].read_as_arrays or self.structure.pkg_spec[ package.package_type - ].read_as_arrays + ].read_array_grid) ): pkg_type = pkg_type[0:-1] # Model Assumption - assuming all name files have a package diff --git a/flopy/mf6/mfpackage.py b/flopy/mf6/mfpackage.py index a962e1d215..bcebfb752c 100644 --- a/flopy/mf6/mfpackage.py +++ b/flopy/mf6/mfpackage.py @@ -3655,9 +3655,9 @@ def update_dataset(self, dataset, netcdf_info=None, mesh=None): } elif mesh.upper() == "LAYERED": dimmap = { - "time": sum(gwf.modeltime.nstp), - "z": gwf.modelgrid.nlay, - "nmesh_face": gwf.modelgrid.ncpl, + "time": sum(modeltime.nstp), + "z": modelgrid.nlay, + "nmesh_face": modelgrid.ncpl, } def _data_shape(shape): From 72a7fbc86aee941a1af5089881f5b1f1b1e40340 Mon Sep 17 00:00:00 2001 From: mjreno Date: Fri, 15 Aug 2025 14:36:21 -0400 Subject: [PATCH 16/44] add tests --- ...1_tutorial.py => mf6_netcdf01_tutorial.py} | 107 +++-- .docs/Notebooks/netcdf02_tutorial.py | 299 ------------ autotest/regression/test_model_netcdf.py | 453 ++++++++++++++++++ flopy/mf6/mfmodel.py | 87 +++- flopy/mf6/mfpackage.py | 10 +- 5 files changed, 615 insertions(+), 341 deletions(-) rename .docs/Notebooks/{netcdf01_tutorial.py => mf6_netcdf01_tutorial.py} (71%) delete mode 100644 .docs/Notebooks/netcdf02_tutorial.py create mode 100644 autotest/regression/test_model_netcdf.py diff --git a/.docs/Notebooks/netcdf01_tutorial.py b/.docs/Notebooks/mf6_netcdf01_tutorial.py similarity index 71% rename from .docs/Notebooks/netcdf01_tutorial.py rename to .docs/Notebooks/mf6_netcdf01_tutorial.py index 8c2f50dba7..30a8bc1f29 100644 --- a/.docs/Notebooks/netcdf01_tutorial.py +++ b/.docs/Notebooks/mf6_netcdf01_tutorial.py @@ -7,7 +7,7 @@ # extension: .py # format_name: light # format_version: '1.5' -# jupytext_version: 1.17.2 +# jupytext_version: 1.16.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python @@ -18,13 +18,15 @@ # # ## NetCDF tutorial 1: MODFLOW 6 structured input file # -# This tutorial demonstrates how to generate a MODFLOW 6 NetCDF file from -# an existing FloPy simulation. In the tutorial, candidate array data is -# added to an xarray dataset and annotated so that the generated NetCDF -# file can be read by MODFLOW 6 as model input. +# This tutorial shows how to generate a MODFLOW 6 NetCDF file from +# an existing FloPy simulation. Two methods will be demonstrated that +# generate a simulation with package data stored in a model NetCDF +# file. The first method is non-interactive- FloPy will generate the +# file with a modified `write_simulation()` call. The second method +# is interactive, which provides an oppurtinity to modify the dataset +# before it is written to NetCDF. # -# This tutorial generates a structured NetCDF variant - for more information -# on supported MODFLOW 6 NetCDF formats see: +# For more information on supported MODFLOW 6 NetCDF formats see: # [MODFLOW NetCDF Format](https://github.com/MODFLOW-ORG/modflow6/wiki/MODFLOW-NetCDF-Format). # # Note that NetCDF is only supported by the Extended version of MODFLOW 6. @@ -75,13 +77,13 @@ "uzf01.uzf.obs": None, } -# for fname, fhash in file_names.items(): -# pooch.retrieve( -# url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{sim_name}/{fname}", -# fname=fname, -# path=data_path / sim_name, -# known_hash=fhash, -# ) +for fname, fhash in file_names.items(): + pooch.retrieve( + url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{sim_name}/{fname}", + fname=fname, + path=data_path / sim_name, + known_hash=fhash, + ) # ## Create simulation workspace @@ -104,7 +106,44 @@ success, buff = sim.run_simulation(silent=True, report=True) assert success, pformat(buff) -# ## Create NetCDF based simulation +# ## Create NetCDF based simulation method 1 +# +# This is the most straightforward way to create a NetCDF simulation +# from the loaded ascii input simulation. Simply define the `netcdf` +# argument to `write_simulation()` to be either `structured` or +# `layered`, depending on the desired format of the generated NetCDF +# file. +# +# The name of the created file can be specified by first setting the +# model `name_file.nc_filerecord` attribute to the desired name. If +# this step is not taken, the default name of `{model_name}.input.nc` +# is used. + +# create directory for netcdf sim +sim.set_sim_path(workspace / "netcdf1") +# set model name file nc_filerecord attribute to export name +gwf = sim.get_model("uzf01") +gwf.name_file.nc_filerecord = "uzf01.structured.nc" +# write simulation with structured NetCDF file +sim.write_simulation(netcdf="structured") + +# success, buff = sim.run_simulation(silent=True, report=True) +# assert success, pformat(buff) + +# ## Repeat method 1 with layered mesh NetCDF format + +# create directory for netcdf sim +sim.set_sim_path(workspace / "netcdf2") +# set model name file nc_filerecord attribute to export name +gwf = sim.get_model("uzf01") +gwf.name_file.nc_filerecord = "uzf01.layered.nc" +# write simulation with with layered mesh NetCDF file +sim.write_simulation(netcdf="layered") + +# success, buff = sim.run_simulation(silent=True, report=True) +# assert success, pformat(buff) + +# ## Create NetCDF based simulation method 2 # # Reset the simulation path and set the `GWF` name file `nc_filerecord` # attribute to the name of the intended input NetCDF file. Display @@ -116,21 +155,30 @@ # and `GHBG` packages. Data will be copied from the package objects into # dataset arrays. # -# Flopy does not currently generate the NetCDF input file. This tutorial -# shows one way that can be accomplished. +# Flopy will not generate the NetCDF input file when the `netcdf` argument +# to `write_simulation()` is set to `nofile`. This step is needed, however, +# to update ascii input with the keywords required to support the model +# NetCDF file that we will generate. # create directory for netcdf sim -sim.set_sim_path(workspace / "netcdf") +sim.set_sim_path(workspace / "netcdf3") # set model name file nc_filerecord attribute to export name gwf = sim.get_model("uzf01") gwf.name_file.nc_filerecord = "uzf01.structured.nc" # write simulation with ASCII inputs tagged for NetCDF -sim.write_simulation(netcdf=True) +# but do not create NetCDF file +sim.write_simulation(netcdf="nofile") + +# ## Show name file with NetCDF input configured + # show name file with NetCDF input configured -with open(workspace / "netcdf" / "uzf01.nam", "r") as fh: +with open(workspace / "netcdf3" / "uzf01.nam", "r") as fh: print(fh.read()) + +# ## Show example package file with NetCDF keywords + # show example package file with NetCDF input configured -with open(workspace / "netcdf" / "uzf01.ic", "r") as fh: +with open(workspace / "netcdf3" / "uzf01.ic", "r") as fh: print(fh.read()) # ## Create dataset @@ -184,14 +232,15 @@ # existing simulation objects and update the dataset. # # Default dataset variable names are defined in the package `netcdf_info()` -# dictionary. +# dictionary. Here we will use the info dictionary to programmatically update +# the dataset- for remaining packages we will hardcode the variable names +# being updated for maximum clarity. -# update dataset from dis arrays -ds["dis_delr"].values = dis.delr.get_data() -ds["dis_delc"].values = dis.delc.get_data() -ds["dis_top"].values = dis.top.get_data() -ds["dis_botm"].values = dis.botm.get_data() -ds["dis_idomain"].values = dis.idomain.get_data() +nc_info = dis.netcdf_info() +for v in nc_info: + name = nc_info[v]["attrs"]["modflow_input"].rsplit("/", 1)[1].lower() + d = getattr(dis, name) + ds[nc_info[v]["varname"]].values = d.get_data() # ## Access `NPF` package NetCDF attributes # @@ -277,7 +326,7 @@ # write dataset to netcdf ds.to_netcdf( - workspace / "netcdf/uzf01.structured.nc", format="NETCDF4", engine="netcdf4" + workspace / "netcdf3" / "uzf01.structured.nc", format="NETCDF4", engine="netcdf4" ) # ## Run MODFLOW 6 simulation with NetCDF input diff --git a/.docs/Notebooks/netcdf02_tutorial.py b/.docs/Notebooks/netcdf02_tutorial.py deleted file mode 100644 index 12ea701705..0000000000 --- a/.docs/Notebooks/netcdf02_tutorial.py +++ /dev/null @@ -1,299 +0,0 @@ -# --- -# jupyter: -# jupytext: -# cell_metadata_filter: -all -# formats: ipynb,py:light -# text_representation: -# extension: .py -# format_name: light -# format_version: '1.5' -# jupytext_version: 1.17.2 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# --- - -# # MODFLOW 6: Generate MODFLOW 6 NetCDF input from existing FloPy sim -# -# ## NetCDF tutorial 2: MODFLOW 6 UGRID layered mesh input file -# -# This tutorial demonstrates how to generate a MODFLOW 6 NetCDF file from -# an existing FloPy simulation. In the tutorial, candidate array data is -# added to an xarray dataset and annotated so that the generated NetCDF -# file can be read by MODFLOW 6 as model input. -# -# This tutorial generates a UGRID layered mesh NetCDF variant - for more -# information on supported MODFLOW 6 NetCDF formats see: -# [MODFLOW NetCDF Format](https://github.com/MODFLOW-ORG/modflow6/wiki/MODFLOW-NetCDF-Format). -# -# Note that NetCDF is only supported by the Extended version of MODFLOW 6. -# A nightly windows build of Extended MODFLOW 6 is available from -# [nightly build](https://github.com/MODFLOW-ORG/modflow6-nightly-build). - -# package import -import sys -from pathlib import Path -from pprint import pformat, pprint -from tempfile import TemporaryDirectory - -import git -import numpy as np -import pooch -import xarray as xr - -import flopy - -print(sys.version) -print(f"flopy version: {flopy.__version__}") - -sim_name = "uzf02" - -# Check if we are in the repository and define the data path. - -try: - root = Path(git.Repo(".", search_parent_directories=True).working_dir) -except: - root = None - -data_path = root / "examples" / "data" / "mf6" / "netcdf" if root else Path.cwd() - -file_names = { - "mfsim.nam": None, - "uzf02.disv": None, - "uzf02.ghbg": None, - "uzf02.ic": None, - "uzf02.ims": None, - "uzf02.nam": None, - "uzf02.npf": None, - "uzf02.obs": None, - "uzf02.oc": None, - "uzf02.sto": None, - "uzf02.tdis": None, - "uzf02.uzf": None, - "uzf02.uzf.obs": None, - "uzf02.uzfobs": None, -} - -for fname, fhash in file_names.items(): - pooch.retrieve( - url=f"https://github.com/modflowpy/flopy/raw/develop/examples/data/{sim_name}/{fname}", - fname=fname, - path=data_path / sim_name, - known_hash=fhash, - ) - -# ## Create simulation workspace - -# create temporary directories -temp_dir = TemporaryDirectory() -workspace = Path(temp_dir.name) - -# ## Load and run baseline simulation -# -# For the purposes of this tutorial, the specifics of this simulation -# other than it is a candidate for NetCDF input are not a focus. It -# is a NetCDF input candidate because it defines a supported model type -# (`GWF6`) with a vertex discretization and packages that support -# NetCDF input parameters. Vertex (`DISV`) discretizations are only -# supported by the `UGRID layered mesh` NetCDF format and as such, the -# `mesh` attribute will be set to `layered` when passed to FloPy functions -# in this tutorial. - -# load and run the non-netcdf simulation -sim = flopy.mf6.MFSimulation.load(sim_ws=data_path / sim_name) -# sim = flopy.mf6.MFSimulation.load(sim_ws=Path("./netcdf02")) -sim.set_sim_path(workspace) -sim.write_simulation() -success, buff = sim.run_simulation(silent=True, report=True) -assert success, pformat(buff) - -# ## Create NetCDF based simulation -# -# Reset the simulation path and set the `GWF` name file `nc_filerecord` -# attribute to the name of the intended input NetCDF file. Display -# the resultant name file changes. -# -# When we write the updated simulation, all packages that support NetCDF -# input parameters will be converted. We will therefore need to create a -# NetCDF input file containing arrays for the `DIS`, `NPF`, `IC`, `STO`, -# and `GHBG` packages. Data will be copied from the package objects into -# dataset arrays. -# -# Flopy does not currently generate the NetCDF input file. This tutorial -# shows one way that can be accomplished. - -# create directory for netcdf sim -sim.set_sim_path(workspace / "netcdf") -# set model name file nc_filerecord attribute to export name -gwf = sim.get_model("uzf02") -gwf.name_file.nc_filerecord = "uzf02.layered.nc" -# write simulation with ASCII inputs tagged for NetCDF -sim.write_simulation(netcdf=True) -# show name file with NetCDF input configured -with open(workspace / "netcdf" / "uzf02.nam", "r") as fh: - print(fh.read()) -# show example package file with NetCDF input configured -with open(workspace / "netcdf" / "uzf02.ic", "r") as fh: - print(fh.read()) - -# ## Create dataset -# -# Create the base xarray dataset from the modelgrid object. This -# will add required dimensions and coordinate variables to the -# dataset according to the grid specification. Modeltime is needed -# for timeseries support. - -# create the dataset -ds = gwf.modelgrid.dataset(modeltime=gwf.modeltime, mesh="layered") - -# ## Access model NetCDF attributes -# -# Access model scoped NetCDF details by storing the dictionary -# returned from `netcdf_info()`. In particular, we need to set dataset -# scoped attributes that are stored in the model netcdf info dict. -# -# First, retrieve and store the netcdf info dictionary and display -# its contents. Then, in the following step, update the dataset with -# the model scoped attributes defined in the dictionary. -# -# These 2 operations can also be accomplished by calling `update_dataset()` -# on the model object. Analogous functions for the package are shown -# below. - -# get model netcdf info -nc_info = gwf.netcdf_info(mesh="layered") -pprint(nc_info) - -# update dataset directly with required attributes -for a in nc_info["attrs"]: - ds.attrs[a] = nc_info["attrs"][a] - -# ## Update the dataset with supported `DIS` arrays -# -# Add NetCDF supported data arrays in package to dataset. Internally, this call -# uses a `netcdf_info()` package dictionary to determine candidate variables -# and relevant information about them. Alternatively, this dictionary can -# be directly accessed, updated, and passed to the `update_dataset()` function. -# That workflow will be demonstrated in the `NPF` package update which follows. - -# update dataset with `DIS` arrays -disv = gwf.get_package("disv") -ds = disv.update_dataset(ds, mesh="layered") - -# ## Update array data -# -# We have created dataset array variables for the package but they do not yet -# define the expected input data for MODFLOW 6. We will take advantage of the -# existing simulation objects and update the dataset. -# -# Default dataset variable names are defined in the package `netcdf_info()` -# dictionary. - -# update dataset from dis arrays -ds["disv_top"].values = disv.top.get_data() -for l in range(gwf.modelgrid.nlay): - ds[f"disv_botm_l{l + 1}"].values = disv.botm.get_data()[l] - -# ## Access `NPF` package NetCDF attributes -# -# Access package scoped NetCDF details by storing the dictionary returned -# from `netcdf_info()`. We need to set package variable attributes that are -# stored in the package netcdf info dict, but we also need other information -# that is relevant to creating the variables themselves. -# -# The contents of the info dictionary are shown and then, in the following -# step, the dictionary and the dataset are passed to a helper routine that -# create the intended array variables. - -# get npf package netcdf info -npf = gwf.get_package("npf") -nc_info = npf.netcdf_info(mesh="layered") -pprint(nc_info) - -# ## Update package `netcdf_info` dictionary and dataset -# -# Here we update the `NPF K` layer 1 input parameter to add the -# `standard_name` attribute to it's attribute dictionary. The dictionary -# is then passed to the `update_dataset()` function. Note the updated name -# is used in the subsequent block when updating the array values. - -# update dataset with `NPF` arrays -nc_info["k/layer1"]["attrs"]["standard_name"] = ( - "soil_hydraulic_conductivity_at_saturation" -) -ds = npf.update_dataset(ds, netcdf_info=nc_info, mesh="layered") - -# ## Update `NPF` array data - -# update dataset from npf arrays -for l in range(gwf.modelgrid.nlay): - ds[f"npf_icelltype_l{l + 1}"].values = npf.icelltype.get_data()[l] - ds[f"npf_k_l{l + 1}"].values = npf.k.get_data()[l] - ds[f"npf_k33_l{l + 1}"].values = npf.k33.get_data()[l] - -# ## Show dataset `NPF K` parameter with updates - -# print dataset npf k variable -print(ds["npf_k_l1"]) - -# ## Update the dataset with supported `IC` arrays - -# ic -ic = gwf.get_package("ic") -ds = ic.update_dataset(ds, mesh="layered") -for l in range(gwf.modelgrid.nlay): - ds[f"ic_strt_l{l + 1}"].values = ic.strt.get_data()[l] - -# ## Update the dataset with supported `STO` arrays - -# storage -sto = gwf.get_package("sto") -ds = sto.update_dataset(ds, mesh="layered") -for l in range(gwf.modelgrid.nlay): - ds[f"sto_iconvert_l{l + 1}"].values = sto.iconvert.get_data()[l] - ds[f"sto_sy_l{l + 1}"].values = sto.sy.get_data()[l] - ds[f"sto_ss_l{l + 1}"].values = sto.ss.get_data()[l] - -# ## Update the dataset with supported `GHBG` arrays - -# update dataset with 'GHBG' arrays -ghbg = gwf.get_package("ghbg_0") -ds = ghbg.update_dataset(ds, mesh="layered") - -# ## Update `GHBG` array data - -# update bhead netcdf array from flopy perioddata -# timeseries step index is first of stress period -for p in ghbg.bhead.get_data(): - if ghbg.bhead.get_data()[p] is not None: - istp = sum(gwf.modeltime.nstp[0:p]) - for l in range(gwf.modelgrid.nlay): - ds[f"ghbg_0_bhead_l{l + 1}"].values[istp] = ghbg.bhead.get_data()[p][l] - -# update cond netcdf array from flopy perioddata -# timeseries step index is first of stress period -for p in ghbg.cond.get_data(): - if ghbg.cond.get_data()[p] is not None: - istp = sum(gwf.modeltime.nstp[0:p]) - for l in range(gwf.modelgrid.nlay): - ds[f"ghbg_0_cond_l{l + 1}"].values[istp] = ghbg.cond.get_data()[p][l] - -# ## Display generated dataset - -# show the dataset -print(ds) - -# ## Export generated dataset to NetCDF - -# write dataset to netcdf -ds.to_netcdf(workspace / "netcdf/uzf02.layered.nc", format="NETCDF4", engine="netcdf4") - -# ## Run MODFLOW 6 simulation with NetCDF input -# -# The simulation generated by this tutorial should be runnable by -# Extended MODFLOW 6, available from the nightly-build repository -# (linked above). - -# success, buff = sim.run_simulation(silent=True, report=True) -# assert success, pformat(buff) diff --git a/autotest/regression/test_model_netcdf.py b/autotest/regression/test_model_netcdf.py new file mode 100644 index 0000000000..9a2dc7da66 --- /dev/null +++ b/autotest/regression/test_model_netcdf.py @@ -0,0 +1,453 @@ +import os +import shutil +from pprint import pformat, pprint + +import numpy as np +import pytest +import xarray as xr +from modflow_devtools.markers import requires_exe, requires_pkg +from pyproj import CRS + +import flopy +from flopy.discretization.structuredgrid import StructuredGrid +from flopy.discretization.vertexgrid import VertexGrid +from flopy.utils.gridutil import get_disv_kwargs + + +def check_netcdf(path, mesh=None): + """Check for functional equivalence""" + ds = xr.open_dataset(path, engine="netcdf4") + packages = [ + "dis", + "npf", + "ic", + "sto", + "ghbg_0", + ] + + # global attributes + assert "modflow_grid" in ds.attrs + assert "modflow_model" in ds.attrs + if mesh is None: + assert "mesh" not in ds.attrs + else: + assert "mesh" in ds.attrs + for a in ds.attrs: + pass + + # coordinates + for coordname, da in ds.coords.items(): + pass + + # variables + for varname, da in ds.data_vars.items(): + if mesh is None: + p = varname.rsplit("_", 1)[0] + else: + p = varname.rsplit("_", 2)[0] + + if p in packages: + assert "modflow_input" in da.attrs + if mesh is None: + assert "layer" not in da.attrs + else: + lstr = varname.rsplit("_", 1)[1] + if lstr[0] == "l": + assert "layer" in da.attrs + + +def update_dataset(dataset, pobj): + nc_info = pobj.netcdf_info() + for v in nc_info: + name = nc_info[v]["attrs"]["modflow_input"].rsplit("/", 1)[1].lower() + d = getattr(pobj, name) + dataset[nc_info[v]["varname"]].values = d.get_data() + + +@pytest.mark.regression +def test_uzf01_model_scope_nofile(function_tmpdir, example_data_path): + sim_name = "uzf01" + netcdf = "nofile" + fname = f"{sim_name}.structured.nc" + data_path_base = example_data_path / "mf6" / "netcdf" + ws = function_tmpdir / sim_name + base_path = data_path_base / sim_name + + # load example + sim = flopy.mf6.MFSimulation.load(sim_ws=base_path) + + # set simulation path and write simulation + sim.set_sim_path(ws) + gwf = sim.get_model(sim_name) + gwf.name_file.nc_filerecord = fname + sim.write_simulation(netcdf=netcdf) + + assert not (ws / fname).exists() + + +@pytest.mark.regression +def test_uzf02_model_scope_nofile(function_tmpdir, example_data_path): + sim_name = "uzf02" + netcdf = "nofile" + fname = f"{sim_name}.input.nc" # default + data_path_base = example_data_path / "mf6" / "netcdf" + ws = function_tmpdir / sim_name + base_path = data_path_base / sim_name + + # load example + sim = flopy.mf6.MFSimulation.load(sim_ws=base_path) + + # set simulation path and write simulation + sim.set_sim_path(ws) + sim.write_simulation(netcdf=netcdf) + + assert not (ws / fname).exists() + + +@pytest.mark.regression +def test_uzf01_sim_scope_nomesh(function_tmpdir, example_data_path): + sim_name = "uzf01" + netcdf = "structured" + fname = f"{sim_name}.input.nc" + data_path_base = example_data_path / "mf6" / "netcdf" + ws = function_tmpdir / sim_name + base_path = data_path_base / sim_name + + # load example + sim = flopy.mf6.MFSimulation.load(sim_ws=base_path) + + # set simulation path and write simulation + sim.set_sim_path(ws) + sim.write_simulation(netcdf=netcdf) + + check_netcdf(ws / fname) + + +@pytest.mark.regression +def test_uzf01_sim_scope_mesh(function_tmpdir, example_data_path): + sim_name = "uzf01" + netcdf = "layered" + fname = f"{sim_name}.input.nc" # default + data_path_base = example_data_path / "mf6" / "netcdf" + ws = function_tmpdir / sim_name + base_path = data_path_base / sim_name + + # load example + sim = flopy.mf6.MFSimulation.load(sim_ws=base_path) + + # set simulation path and write simulation + sim.set_sim_path(ws) + sim.write_simulation(netcdf=netcdf) + + check_netcdf(ws / fname, mesh=netcdf) + + +@pytest.mark.regression +def test_uzf01_sim_scope_fname(function_tmpdir, example_data_path): + sim_name = "uzf01" + netcdf = "structured" + fname = f"{sim_name}.layered.nc" + data_path_base = example_data_path / "mf6" / "netcdf" + ws = function_tmpdir / sim_name + base_path = data_path_base / sim_name + + # load example + sim = flopy.mf6.MFSimulation.load(sim_ws=base_path) + + # update write fname + gwf = sim.get_model(sim_name) + gwf.name_file.nc_filerecord = fname + + # set simulation path and write simulation + sim.set_sim_path(ws) + sim.write_simulation(netcdf=netcdf) + + check_netcdf(ws / fname) + + +@pytest.mark.regression +def test_uzf02_sim_scope(function_tmpdir, example_data_path): + sim_name = "uzf02" + netcdf = "layered" + fname = f"{sim_name}.input.nc" # default + data_path_base = example_data_path / "mf6" / "netcdf" + ws = function_tmpdir / sim_name + base_path = data_path_base / sim_name + + # load example + sim = flopy.mf6.MFSimulation.load(sim_ws=base_path) + + # set simulation path and write simulation + sim.set_sim_path(ws) + sim.write_simulation(netcdf=netcdf) + + check_netcdf(ws / fname, mesh=netcdf) + + +@pytest.mark.regression +def test_uzf02_sim_scope_fname(function_tmpdir, example_data_path): + sim_name = "uzf02" + netcdf = "layered" + fname = f"{sim_name}.layered.nc" + data_path_base = example_data_path / "mf6" / "netcdf" + ws = function_tmpdir / sim_name + base_path = data_path_base / sim_name + + # load example + sim = flopy.mf6.MFSimulation.load(sim_ws=base_path) + + # update write fname + gwf = sim.get_model(sim_name) + gwf.name_file.nc_filerecord = fname + + # set simulation path and write simulation + sim.set_sim_path(ws) + sim.write_simulation(netcdf=netcdf) + + check_netcdf(ws / fname, mesh=netcdf) + + +@pytest.mark.regression +def test_uzf01_model_scope_nomesh(function_tmpdir, example_data_path): + sim_name = "uzf01" + netcdf = "nofile" + fname = f"{sim_name}.structured.nc" + data_path_base = example_data_path / "mf6" / "netcdf" + ws = function_tmpdir / sim_name + base_path = data_path_base / sim_name + + # load example + sim = flopy.mf6.MFSimulation.load(sim_ws=base_path) + + # set simulation path and write simulation + sim.set_sim_path(ws) + gwf = sim.get_model(sim_name) + gwf.name_file.nc_filerecord = fname + sim.write_simulation(netcdf=netcdf) + + # create dataset + ds = gwf.modelgrid.dataset(modeltime=gwf.modeltime) + ds = gwf.update_dataset(ds) + + # write dataset to netcdf + ds.to_netcdf(ws / fname, format="NETCDF4", engine="netcdf4") + + check_netcdf(ws / fname) + + +def test_uzf01_model_scope_mesh(function_tmpdir, example_data_path): + sim_name = "uzf01" + netcdf = "nofile" + mesh = "layered" + fname = f"{sim_name}.layered.nc" + data_path_base = example_data_path / "mf6" / "netcdf" + ws = function_tmpdir / sim_name + base_path = data_path_base / sim_name + + # load example + sim = flopy.mf6.MFSimulation.load(sim_ws=base_path) + + # set simulation path and write simulation + sim.set_sim_path(ws) + gwf = sim.get_model(sim_name) + gwf.name_file.nc_filerecord = fname + sim.write_simulation(netcdf=netcdf) + + # create dataset + ds = gwf.modelgrid.dataset(modeltime=gwf.modeltime, mesh=mesh) + ds = gwf.update_dataset(ds, mesh=mesh) + + # write dataset to netcdf + ds.to_netcdf(ws / fname, format="NETCDF4", engine="netcdf4") + + check_netcdf(ws / fname, mesh=mesh) + + +@pytest.mark.regression +def test_uzf02_model_scope(function_tmpdir, example_data_path): + sim_name = "uzf02" + netcdf = "nofile" + mesh = "layered" + fname = f"{sim_name}.layered.nc" + data_path_base = example_data_path / "mf6" / "netcdf" + ws = function_tmpdir / sim_name + base_path = data_path_base / sim_name + + # load example + sim = flopy.mf6.MFSimulation.load(sim_ws=base_path) + + # set simulation path and write simulation + sim.set_sim_path(ws) + gwf = sim.get_model(sim_name) + gwf.name_file.nc_filerecord = fname + sim.write_simulation(netcdf=netcdf) + + # create dataset + ds = gwf.modelgrid.dataset(modeltime=gwf.modeltime, mesh=mesh) + ds = gwf.update_dataset(ds, mesh=mesh) + + # write dataset to netcdf + ds.to_netcdf(ws / fname, format="NETCDF4", engine="netcdf4") + + check_netcdf(ws / fname, mesh=mesh) + + +@pytest.mark.regression +def test_uzf01_pkg_scope(function_tmpdir, example_data_path): + sim_name = "uzf01" + fname = f"{sim_name}.structured.nc" + netcdf = "structured" + data_path_base = example_data_path / "mf6" / "netcdf" + ws = function_tmpdir / sim_name + base_path = data_path_base / sim_name + + # load example + sim = flopy.mf6.MFSimulation.load(sim_ws=base_path) + + # set simulation path and write simulation + sim.set_sim_path(ws) + gwf = sim.get_model(sim_name) + gwf.name_file.nc_filerecord = fname + sim.write_simulation(netcdf=netcdf) + + # create dataset + ds = gwf.modelgrid.dataset(modeltime=gwf.modeltime) + + # get model netcdf info + nc_info = gwf.netcdf_info() + + # update dataset directly with required attributes + for a in nc_info["attrs"]: + ds.attrs[a] = nc_info["attrs"][a] + + # add all packages and update data + for p in gwf.packagelist: + ds = p.update_dataset(ds) + nc_info = p.netcdf_info() + for v in nc_info: + name = nc_info[v]["attrs"]["modflow_input"].rsplit("/", 1)[1].lower() + d = getattr(p, name) + if d.repeating: + for per in d.get_data(): + istp = sum(gwf.modeltime.nstp[0:per]) + ds[nc_info[v]["varname"]].values[istp] = d.get_data()[per] + else: + ds[nc_info[v]["varname"]].values = d.get_data() + + # write dataset to netcdf + ds.to_netcdf(ws / fname, format="NETCDF4", engine="netcdf4") + + check_netcdf(ws / fname) + + +@pytest.mark.regression +def test_uzf01_pkg_scope_modify(function_tmpdir, example_data_path): + sim_name = "uzf01" + netcdf = "structured" + fname = f"{sim_name}.structured.nc" + data_path_base = example_data_path / "mf6" / "netcdf" + ws = function_tmpdir / sim_name + base_path = data_path_base / sim_name + + # load example + sim = flopy.mf6.MFSimulation.load(sim_ws=base_path) + + # set simulation path and write simulation + sim.set_sim_path(ws) + gwf = sim.get_model(sim_name) + gwf.name_file.nc_filerecord = fname + sim.write_simulation(netcdf=netcdf) + + # create dataset + ds = gwf.modelgrid.dataset(modeltime=gwf.modeltime) + + # get model netcdf info + nc_info = gwf.netcdf_info() + + # update dataset directly with required attributes + for a in nc_info["attrs"]: + ds.attrs[a] = nc_info["attrs"][a] + + # update dataset with `DIS` arrays + dis = gwf.get_package("dis") + ds = dis.update_dataset(ds) + update_dataset(ds, dis) + + # get npf package netcdf info + npf = gwf.get_package("npf") + nc_info = npf.netcdf_info() + + # update dataset with `NPF` arrays + # change k varname and add attribute + nc_info["k"]["varname"] = "npf_k_updated" + nc_info["k"]["attrs"]["standard_name"] = "soil_hydraulic_conductivity_at_saturation" + ds = npf.update_dataset(ds, netcdf_info=nc_info) + + # update dataset from npf arrays + ds["npf_icelltype"].values = npf.icelltype.get_data() + ds["npf_k_updated"].values = npf.k.get_data() + + # ic + ic = gwf.get_package("ic") + ds = ic.update_dataset(ds) + update_dataset(ds, ic) + + # storage + sto = gwf.get_package("sto") + ds = sto.update_dataset(ds) + update_dataset(ds, sto) + + # update dataset with 'GHBG' arrays + ghbg = gwf.get_package("ghbg_0") + ds = ghbg.update_dataset(ds) + + # update bhead netcdf array from flopy perioddata + # timeseries step index is first of stress period + for p in ghbg.bhead.get_data(): + istp = sum(gwf.modeltime.nstp[0:p]) + ds["ghbg_0_bhead"].values[istp] = ghbg.bhead.get_data()[p] + + # update cond netcdf array from flopy perioddata + # timeseries step index is first of stress period + for p in ghbg.cond.get_data(): + istp = sum(gwf.modeltime.nstp[0:p]) + ds["ghbg_0_cond"].values[istp] = ghbg.cond.get_data()[p] + + # write dataset to netcdf + ds.to_netcdf(ws / fname, format="NETCDF4", engine="netcdf4") + + check_netcdf(ws / fname) + assert ( + ds["npf_k_updated"].attrs["standard_name"] + == "soil_hydraulic_conductivity_at_saturation" + ) + + +@pytest.mark.regression +def test_uzf01_cycle(function_tmpdir, example_data_path): + sim_name = "uzf01" + netcdf = "structured" + fname = f"{sim_name}.input.nc" # default + data_path_base = example_data_path / "mf6" / "netcdf" + ws = function_tmpdir / sim_name + base_path = data_path_base / sim_name + + # load example + sim = flopy.mf6.MFSimulation.load(sim_ws=base_path) + + # set simulation path and write simulation + sim.set_sim_path(ws) + sim.write_simulation(netcdf=netcdf) + + check_netcdf(ws / fname) + + # set simulation path and rewrite base simulation + sim.set_sim_path(ws / "mf6") + # gwf = sim.get_model(sim_name) + # gwf.name_file.nc_filerecord = None + sim.write_simulation() + + assert not (ws / "mf6" / fname).exists() + + success, buff = sim.run_simulation(silent=True, report=True) + assert success, pformat(buff) diff --git a/flopy/mf6/mfmodel.py b/flopy/mf6/mfmodel.py index f2b783cacb..4055a82ce8 100644 --- a/flopy/mf6/mfmodel.py +++ b/flopy/mf6/mfmodel.py @@ -1330,6 +1330,19 @@ def write( 'mesh2d' and 'structured' are supported types. """ + write_netcdf = netcdf and ( + self.model_type == "gwf6" + or self.model_type == "gwt6" + or self.model_type == "gwe6" + ) + + if write_netcdf: + nc_fname = None + if self.name_file.nc_filerecord.get_data() is None: + # update name file for netcdf input + nc_fname = f"{self.name}.input.nc" + self.name_file.nc_filerecord = nc_fname + # write name file if ( self.simulation_data.verbosity_level.value @@ -1346,12 +1359,6 @@ def write( self.simulation_data.max_columns_user_set = False self.simulation_data.max_columns_auto_set = True - write_netcdf = netcdf and ( - self.model_type == "gwf6" - or self.model_type == "gwt6" - or self.model_type == "gwe6" - ) - # write packages for pp in self.packagelist: if write_netcdf: @@ -1365,9 +1372,34 @@ def write( print(f" writing package {pp._get_pname()}...") pp.write(ext_file_action=ext_file_action) - if write_netcdf: - # reset data storage - pp._set_netcdf_storage(reset=True) + # reset data storage + pp._set_netcdf_storage(reset=True) + + # write netcdf file + if write_netcdf and netcdf.lower() != "nofile": + mesh = netcdf + if mesh.upper() == "STRUCTURED": + mesh = None + + ds = self.modelgrid.dataset( + modeltime=self.modeltime, + mesh=mesh, + ) + + ds = self.update_dataset(ds, mesh=mesh) + + # write dataset to netcdf + fname = self.name_file.nc_filerecord.get_data()[0][0] + ds.to_netcdf( + os.path.join(self.model_ws, fname), + format="NETCDF4", + engine="netcdf4" + ) + + if nc_fname is not None: + self.name_file.nc_filerecord = None + + def get_grid_type(self): """ @@ -2287,4 +2319,41 @@ def update_dataset(self, dataset, netcdf_info=None, mesh=None): for a in nc_info["attrs"]: dataset.attrs[a] = nc_info["attrs"][a] + # add all packages and update data + for p in self.packagelist: + # add package var to dataset + dataset = p.update_dataset(dataset, mesh=mesh) + + # update dataset var values + nc_info = p.netcdf_info(mesh=mesh) + for v in nc_info: + name = nc_info[v]["attrs"]["modflow_input"].split("/")[2].lower() + if mesh == None: + #name = nc_info[v]["varname"].rsplit("_", 1)[1] + d = getattr(p, name) + if d.repeating: + for per in d.get_data(): + istp = sum(self.modeltime.nstp[0:per]) + dataset[nc_info[v]["varname"]].values[istp] = d.get_data()[per] + else: + dataset[nc_info[v]["varname"]].values = d.get_data() + elif mesh.upper() == "LAYERED": + if "layer" in nc_info[v]["attrs"]: + #name = nc_info[v]["varname"].rsplit("_", 2)[1] + layer = nc_info[v]["attrs"]["layer"] - 1 + else: + #name = nc_info[v]["varname"].rsplit("_", 1)[1] + layer = -1 + d = getattr(p, name) + if d.repeating: + for per in d.get_data(): + if d.get_data()[per] is not None: + istp = sum(self.modeltime.nstp[0:per]) + dataset[nc_info[v]["varname"]].values[istp] = d.get_data()[per][layer] + else: + if layer >= 0: + dataset[nc_info[v]["varname"]].values = d.get_data()[layer].flatten() + else: + dataset[nc_info[v]["varname"]].values = d.get_data().flatten() + return dataset diff --git a/flopy/mf6/mfpackage.py b/flopy/mf6/mfpackage.py index bcebfb752c..22481c3374 100644 --- a/flopy/mf6/mfpackage.py +++ b/flopy/mf6/mfpackage.py @@ -3638,6 +3638,7 @@ def netcdf_info(self, mesh=None): return attrs def update_dataset(self, dataset, netcdf_info=None, mesh=None): + from ..discretization.vertexgrid import VertexGrid if netcdf_info is None: nc_info = self.netcdf_info(mesh=mesh) else: @@ -3646,17 +3647,18 @@ def update_dataset(self, dataset, netcdf_info=None, mesh=None): modelgrid = self.model_or_sim.modelgrid modeltime = self.model_or_sim.modeltime - if mesh is None: + if isinstance(modelgrid, VertexGrid): dimmap = { "time": sum(modeltime.nstp), "z": modelgrid.nlay, - "y": modelgrid.nrow, - "x": modelgrid.ncol, + "nmesh_face": modelgrid.ncpl, } - elif mesh.upper() == "LAYERED": + else: dimmap = { "time": sum(modeltime.nstp), "z": modelgrid.nlay, + "y": modelgrid.nrow, + "x": modelgrid.ncol, "nmesh_face": modelgrid.ncpl, } From bdb4a88af94667d53b3125fa4c7fec4845e83f07 Mon Sep 17 00:00:00 2001 From: mjreno Date: Fri, 15 Aug 2025 14:49:32 -0400 Subject: [PATCH 17/44] mark test regression --- autotest/regression/test_model_netcdf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autotest/regression/test_model_netcdf.py b/autotest/regression/test_model_netcdf.py index 9a2dc7da66..083cace6a6 100644 --- a/autotest/regression/test_model_netcdf.py +++ b/autotest/regression/test_model_netcdf.py @@ -234,7 +234,7 @@ def test_uzf01_model_scope_nomesh(function_tmpdir, example_data_path): check_netcdf(ws / fname) - +@pytest.mark.regression def test_uzf01_model_scope_mesh(function_tmpdir, example_data_path): sim_name = "uzf01" netcdf = "nofile" From e63b739b44928b878e1788a66a73faaa36f7b87c Mon Sep 17 00:00:00 2001 From: mjreno Date: Fri, 15 Aug 2025 14:54:11 -0400 Subject: [PATCH 18/44] format --- autotest/regression/test_model_netcdf.py | 1 + 1 file changed, 1 insertion(+) diff --git a/autotest/regression/test_model_netcdf.py b/autotest/regression/test_model_netcdf.py index 083cace6a6..815d94b4ba 100644 --- a/autotest/regression/test_model_netcdf.py +++ b/autotest/regression/test_model_netcdf.py @@ -234,6 +234,7 @@ def test_uzf01_model_scope_nomesh(function_tmpdir, example_data_path): check_netcdf(ws / fname) + @pytest.mark.regression def test_uzf01_model_scope_mesh(function_tmpdir, example_data_path): sim_name = "uzf01" From 086f2a90f3c8ef755a06ab454265aadd388a3ace Mon Sep 17 00:00:00 2001 From: mjreno Date: Fri, 15 Aug 2025 16:17:19 -0400 Subject: [PATCH 19/44] add xarray to pyproject.toml --- flopy/discretization/structuredgrid.py | 3 ++- flopy/discretization/vertexgrid.py | 3 ++- pyproject.toml | 1 + 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/flopy/discretization/structuredgrid.py b/flopy/discretization/structuredgrid.py index 001137190b..24f41178c3 100644 --- a/flopy/discretization/structuredgrid.py +++ b/flopy/discretization/structuredgrid.py @@ -1771,7 +1771,8 @@ def get_plottable_layer_array(self, a, layer): return plotarray def dataset(self, modeltime=None, mesh=None): - import xarray as xr + from ..utils import import_optional_dependency + xr = import_optional_dependency("xarray") if modeltime is None: raise ValueError("modeltime required for dataset timeseries") diff --git a/flopy/discretization/vertexgrid.py b/flopy/discretization/vertexgrid.py index 2e43e36d81..2d44b56d5d 100644 --- a/flopy/discretization/vertexgrid.py +++ b/flopy/discretization/vertexgrid.py @@ -601,7 +601,8 @@ def get_plottable_layer_array(self, a, layer): return plotarray def dataset(self, modeltime=None, mesh=None): - import xarray as xr + from ..utils import import_optional_dependency + xr = import_optional_dependency("xarray") FILLNA_INT32 = np.int32(-2147483647) FILLNA_DBL = 9.96920996838687e36 diff --git a/pyproject.toml b/pyproject.toml index be5c8da9ab..26a3ae14d7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -85,6 +85,7 @@ optional = [ "xmipy", "h5py", "scikit-learn" + "xarray", ] doc = [ "flopy[optional]", From 5354b953a4508f9241d8364242a946f1e23e80a3 Mon Sep 17 00:00:00 2001 From: mjreno Date: Fri, 15 Aug 2025 16:20:58 -0400 Subject: [PATCH 20/44] format --- flopy/discretization/structuredgrid.py | 1 + flopy/discretization/vertexgrid.py | 1 + 2 files changed, 2 insertions(+) diff --git a/flopy/discretization/structuredgrid.py b/flopy/discretization/structuredgrid.py index 24f41178c3..30006e0c0a 100644 --- a/flopy/discretization/structuredgrid.py +++ b/flopy/discretization/structuredgrid.py @@ -1772,6 +1772,7 @@ def get_plottable_layer_array(self, a, layer): def dataset(self, modeltime=None, mesh=None): from ..utils import import_optional_dependency + xr = import_optional_dependency("xarray") if modeltime is None: diff --git a/flopy/discretization/vertexgrid.py b/flopy/discretization/vertexgrid.py index 2d44b56d5d..a3aadd05dc 100644 --- a/flopy/discretization/vertexgrid.py +++ b/flopy/discretization/vertexgrid.py @@ -602,6 +602,7 @@ def get_plottable_layer_array(self, a, layer): def dataset(self, modeltime=None, mesh=None): from ..utils import import_optional_dependency + xr = import_optional_dependency("xarray") FILLNA_INT32 = np.int32(-2147483647) From f089e8cf0664734a20f1c4e980f032ea021fad59 Mon Sep 17 00:00:00 2001 From: mjreno Date: Fri, 15 Aug 2025 16:40:40 -0400 Subject: [PATCH 21/44] temporarily suspend run --- autotest/regression/test_model_netcdf.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/autotest/regression/test_model_netcdf.py b/autotest/regression/test_model_netcdf.py index 815d94b4ba..dd88d222b2 100644 --- a/autotest/regression/test_model_netcdf.py +++ b/autotest/regression/test_model_netcdf.py @@ -450,5 +450,5 @@ def test_uzf01_cycle(function_tmpdir, example_data_path): assert not (ws / "mf6" / fname).exists() - success, buff = sim.run_simulation(silent=True, report=True) - assert success, pformat(buff) + # success, buff = sim.run_simulation(silent=True, report=True) + # assert success, pformat(buff) From f320465a81a98ae9389445d7d65617b291a817d4 Mon Sep 17 00:00:00 2001 From: mjreno Date: Sat, 16 Aug 2025 15:56:52 -0400 Subject: [PATCH 22/44] data tests, add longname --- autotest/regression/test_model_netcdf.py | 53 +++++++++++++++++------- flopy/discretization/grid.py | 7 ++-- flopy/discretization/structuredgrid.py | 16 +++++++ flopy/discretization/vertexgrid.py | 17 ++++++++ flopy/mf6/mfmodel.py | 5 +-- flopy/mf6/mfpackage.py | 47 +++++++++++---------- flopy/mf6/mfsimbase.py | 2 +- flopy/mf6/utils/codegen/__init__.py | 1 + flopy/mf6/utils/codegen/filters.py | 6 +++ 9 files changed, 110 insertions(+), 44 deletions(-) diff --git a/autotest/regression/test_model_netcdf.py b/autotest/regression/test_model_netcdf.py index dd88d222b2..d84a7a1b7a 100644 --- a/autotest/regression/test_model_netcdf.py +++ b/autotest/regression/test_model_netcdf.py @@ -14,7 +14,7 @@ from flopy.utils.gridutil import get_disv_kwargs -def check_netcdf(path, mesh=None): +def check_netcdf(path, mobj, mesh=None): """Check for functional equivalence""" ds = xr.open_dataset(path, engine="netcdf4") packages = [ @@ -47,6 +47,7 @@ def check_netcdf(path, mesh=None): p = varname.rsplit("_", 2)[0] if p in packages: + l = -1 assert "modflow_input" in da.attrs if mesh is None: assert "layer" not in da.attrs @@ -54,6 +55,31 @@ def check_netcdf(path, mesh=None): lstr = varname.rsplit("_", 1)[1] if lstr[0] == "l": assert "layer" in da.attrs + l = da.attrs["layer"] - 1 + + tag = da.attrs["modflow_input"].rsplit("/", 1)[1].lower() + pobj = getattr(mobj, p) + d = getattr(pobj, tag) + if p == "ghbg_0": + spd = d.get_data() + for per in spd: + if spd[per] is not None: + istp = sum(mobj.modeltime.nstp[0:per]) + if l >= 0: + assert np.allclose( + ds.data_vars[varname][istp].fillna(3.00000000e30).data, + spd[per][l], + ) + else: + assert np.allclose( + ds.data_vars[varname][istp].fillna(3.00000000e30).data, + spd[per], + ) + else: + if l >= 0: + assert np.allclose(ds.data_vars[varname].values, d.get_data()[l]) + else: + assert np.allclose(ds.data_vars[varname].values, d.get_data()) def update_dataset(dataset, pobj): @@ -120,7 +146,7 @@ def test_uzf01_sim_scope_nomesh(function_tmpdir, example_data_path): sim.set_sim_path(ws) sim.write_simulation(netcdf=netcdf) - check_netcdf(ws / fname) + check_netcdf(ws / fname, sim.get_model(sim_name)) @pytest.mark.regression @@ -139,7 +165,7 @@ def test_uzf01_sim_scope_mesh(function_tmpdir, example_data_path): sim.set_sim_path(ws) sim.write_simulation(netcdf=netcdf) - check_netcdf(ws / fname, mesh=netcdf) + check_netcdf(ws / fname, sim.get_model(sim_name), mesh=netcdf) @pytest.mark.regression @@ -162,7 +188,7 @@ def test_uzf01_sim_scope_fname(function_tmpdir, example_data_path): sim.set_sim_path(ws) sim.write_simulation(netcdf=netcdf) - check_netcdf(ws / fname) + check_netcdf(ws / fname, sim.get_model(sim_name)) @pytest.mark.regression @@ -181,7 +207,7 @@ def test_uzf02_sim_scope(function_tmpdir, example_data_path): sim.set_sim_path(ws) sim.write_simulation(netcdf=netcdf) - check_netcdf(ws / fname, mesh=netcdf) + check_netcdf(ws / fname, sim.get_model(sim_name), mesh=netcdf) @pytest.mark.regression @@ -204,7 +230,7 @@ def test_uzf02_sim_scope_fname(function_tmpdir, example_data_path): sim.set_sim_path(ws) sim.write_simulation(netcdf=netcdf) - check_netcdf(ws / fname, mesh=netcdf) + check_netcdf(ws / fname, sim.get_model(sim_name), mesh=netcdf) @pytest.mark.regression @@ -232,7 +258,7 @@ def test_uzf01_model_scope_nomesh(function_tmpdir, example_data_path): # write dataset to netcdf ds.to_netcdf(ws / fname, format="NETCDF4", engine="netcdf4") - check_netcdf(ws / fname) + check_netcdf(ws / fname, sim.get_model(sim_name)) @pytest.mark.regression @@ -261,7 +287,7 @@ def test_uzf01_model_scope_mesh(function_tmpdir, example_data_path): # write dataset to netcdf ds.to_netcdf(ws / fname, format="NETCDF4", engine="netcdf4") - check_netcdf(ws / fname, mesh=mesh) + check_netcdf(ws / fname, sim.get_model(sim_name), mesh=mesh) @pytest.mark.regression @@ -290,7 +316,7 @@ def test_uzf02_model_scope(function_tmpdir, example_data_path): # write dataset to netcdf ds.to_netcdf(ws / fname, format="NETCDF4", engine="netcdf4") - check_netcdf(ws / fname, mesh=mesh) + check_netcdf(ws / fname, sim.get_model(sim_name), mesh=mesh) @pytest.mark.regression @@ -338,7 +364,7 @@ def test_uzf01_pkg_scope(function_tmpdir, example_data_path): # write dataset to netcdf ds.to_netcdf(ws / fname, format="NETCDF4", engine="netcdf4") - check_netcdf(ws / fname) + check_netcdf(ws / fname, sim.get_model(sim_name)) @pytest.mark.regression @@ -417,7 +443,7 @@ def test_uzf01_pkg_scope_modify(function_tmpdir, example_data_path): # write dataset to netcdf ds.to_netcdf(ws / fname, format="NETCDF4", engine="netcdf4") - check_netcdf(ws / fname) + check_netcdf(ws / fname, sim.get_model(sim_name)) assert ( ds["npf_k_updated"].attrs["standard_name"] == "soil_hydraulic_conductivity_at_saturation" @@ -440,15 +466,14 @@ def test_uzf01_cycle(function_tmpdir, example_data_path): sim.set_sim_path(ws) sim.write_simulation(netcdf=netcdf) - check_netcdf(ws / fname) + check_netcdf(ws / fname, sim.get_model(sim_name)) # set simulation path and rewrite base simulation sim.set_sim_path(ws / "mf6") - # gwf = sim.get_model(sim_name) - # gwf.name_file.nc_filerecord = None sim.write_simulation() assert not (ws / "mf6" / fname).exists() + # codegen isn't using latest modflow dev branch # success, buff = sim.run_simulation(silent=True, report=True) # assert success, pformat(buff) diff --git a/flopy/discretization/grid.py b/flopy/discretization/grid.py index e4416ba377..c61968ad0a 100644 --- a/flopy/discretization/grid.py +++ b/flopy/discretization/grid.py @@ -1290,15 +1290,16 @@ def write_shapefile(self, filename="grid.shp", crs=None, prjfile=None, **kwargs) def dataset(self, modeltime=None, mesh=None): """ - Method to generate baseline Xarray dataset + Method to generate baseline xarray dataset Parameters ---------- - mesh + modeltime : FloPy ModelTime object + mesh : mesh type Returns ------- - Xarray dataset + xarray dataset """ raise NotImplementedError("dataset must be defined in the child class") diff --git a/flopy/discretization/structuredgrid.py b/flopy/discretization/structuredgrid.py index 30006e0c0a..49298b9a98 100644 --- a/flopy/discretization/structuredgrid.py +++ b/flopy/discretization/structuredgrid.py @@ -1771,6 +1771,11 @@ def get_plottable_layer_array(self, a, layer): return plotarray def dataset(self, modeltime=None, mesh=None): + """ + modeltime : FloPy ModelTime object + mesh : mesh type + valid mesh types are "layered" or None + """ from ..utils import import_optional_dependency xr = import_optional_dependency("xarray") @@ -1791,6 +1796,17 @@ def _layered_mesh_dataset(self, ds, modeltime=None): FILLNA_DBL = 9.96920996838687e36 lenunits = {0: "m", 1: "ft", 2: "m", 3: "m"} + # create dataset coordinate vars + var_d = { + "time": (["time"], modeltime.totim), + } + ds = ds.assign(var_d) + ds["time"].attrs["calendar"] = "standard" + ds["time"].attrs["units"] = f"days since {modeltime.start_datetime}" + ds["time"].attrs["axis"] = "T" + ds["time"].attrs["standard_name"] = "time" + ds["time"].attrs["long_name"] = "time" + # mesh container variable ds = ds.assign({"mesh": ([], np.int32(1))}) ds["mesh"].attrs["cf_role"] = "mesh_topology" diff --git a/flopy/discretization/vertexgrid.py b/flopy/discretization/vertexgrid.py index a3aadd05dc..47f0c17b36 100644 --- a/flopy/discretization/vertexgrid.py +++ b/flopy/discretization/vertexgrid.py @@ -601,6 +601,12 @@ def get_plottable_layer_array(self, a, layer): return plotarray def dataset(self, modeltime=None, mesh=None): + """ + modeltime : FloPy ModelTime object + mesh : mesh type + valid mesh types are "layered" or None + VertexGrid objects only support layered mesh + """ from ..utils import import_optional_dependency xr = import_optional_dependency("xarray") @@ -618,6 +624,17 @@ def dataset(self, modeltime=None, mesh=None): ds = xr.Dataset() ds.attrs["modflow_grid"] = "VERTEX" + # create dataset coordinate vars + var_d = { + "time": (["time"], modeltime.totim), + } + ds = ds.assign(var_d) + ds["time"].attrs["calendar"] = "standard" + ds["time"].attrs["units"] = f"days since {modeltime.start_datetime}" + ds["time"].attrs["axis"] = "T" + ds["time"].attrs["standard_name"] = "time" + ds["time"].attrs["long_name"] = "time" + # mesh container variable ds = ds.assign({"mesh": ([], np.int32(1))}) ds["mesh"].attrs["cf_role"] = "mesh_topology" diff --git a/flopy/mf6/mfmodel.py b/flopy/mf6/mfmodel.py index 4055a82ce8..c658508a31 100644 --- a/flopy/mf6/mfmodel.py +++ b/flopy/mf6/mfmodel.py @@ -1327,7 +1327,7 @@ def write( with relative paths, leaving files defined by absolute paths fixed. netcdf : str ASCII package files will be written as configured for NetCDF input. - 'mesh2d' and 'structured' are supported types. + 'layered' and 'structured' are supported types. """ write_netcdf = netcdf and ( @@ -2329,7 +2329,6 @@ def update_dataset(self, dataset, netcdf_info=None, mesh=None): for v in nc_info: name = nc_info[v]["attrs"]["modflow_input"].split("/")[2].lower() if mesh == None: - #name = nc_info[v]["varname"].rsplit("_", 1)[1] d = getattr(p, name) if d.repeating: for per in d.get_data(): @@ -2339,10 +2338,8 @@ def update_dataset(self, dataset, netcdf_info=None, mesh=None): dataset[nc_info[v]["varname"]].values = d.get_data() elif mesh.upper() == "LAYERED": if "layer" in nc_info[v]["attrs"]: - #name = nc_info[v]["varname"].rsplit("_", 2)[1] layer = nc_info[v]["attrs"]["layer"] - 1 else: - #name = nc_info[v]["varname"].rsplit("_", 1)[1] layer = -1 d = getattr(p, name) if d.repeating: diff --git a/flopy/mf6/mfpackage.py b/flopy/mf6/mfpackage.py index 22481c3374..04228426c5 100644 --- a/flopy/mf6/mfpackage.py +++ b/flopy/mf6/mfpackage.py @@ -3466,7 +3466,7 @@ def plot(self, **kwargs): @staticmethod def _add_netcdf_entries( - attrs, mname, pname, data_item, auxiliary=None, mesh=None, nlay=1 + entries, mname, pname, data_item, auxiliary=None, mesh=None, nlay=1 ): DNODATA = 3.0e30 # MF6 DNODATA constant FILLNA_INT32 = np.int32(-2147483647) # netcdf-fortran NF90_FILL_INT @@ -3480,7 +3480,7 @@ def _add_netcdf_entries( def _add_entry(tagname, iaux=None, layer=None): # netcdf variable dictionary - a = {} + e = {} # set dict key and netcdf variable name key = tagname @@ -3495,11 +3495,11 @@ def _add_entry(tagname, iaux=None, layer=None): name = f"{name}_l{layer}" # add non-attrs to dictionary - a["varname"] = name.lower() + e["varname"] = name.lower() if (data_item.type) == DatumType.integer: - a["xarray_type"] = np.int32 + e["xarray_type"] = np.int32 elif (data_item.type) == DatumType.double_precision: - a["xarray_type"] = np.float64 + e["xarray_type"] = np.float64 dims = [] if data_item.shape[0] == 'nodes': if data_item.block_name == "griddata": @@ -3531,27 +3531,30 @@ def _add_entry(tagname, iaux=None, layer=None): elif data_item.shape[0] == 'nrow': dims.append("y") - a["netcdf_shape"] = dims[::-1] + e["netcdf_shape"] = dims[::-1] # add variable attributes dictionary - a["attrs"] = {} - a["attrs"]["modflow_input"] = (f"{mname}/{pname}/{tagname}").upper() + e["attrs"] = {} + e["attrs"]["modflow_input"] = (f"{mname}/{pname}/{tagname}").upper() if iaux is not None: - a["attrs"]["modflow_iaux"] = iaux + 1 + e["attrs"]["modflow_iaux"] = iaux + 1 if layer is not None: - a["attrs"]["layer"] = layer + e["attrs"]["layer"] = layer if (data_item.type) == DatumType.integer: - a["attrs"]["_FillValue"] = FILLNA_INT32 + e["attrs"]["_FillValue"] = FILLNA_INT32 elif (data_item.type) == DatumType.double_precision: if data_item.block_name == "griddata": - a["attrs"]["_FillValue"] = FILLNA_DBL + e["attrs"]["_FillValue"] = FILLNA_DBL elif data_item.block_name == "period": - a["attrs"]["_FillValue"] = DNODATA + e["attrs"]["_FillValue"] = DNODATA if data_item.longname is not None: - a["attrs"]["longname"] = data_item.longname + if layer is not None: + e["attrs"]["longname"] = f"{data_item.longname} layer {layer}" + else: + e["attrs"]["longname"] = data_item.longname # set dictionary - attrs[key] = a + entries[key] = e if data_item.layered and mesh and mesh.upper() == "LAYERED": if data_item.name == "aux" or data_item.name == "auxvar": @@ -3572,7 +3575,7 @@ def _add_entry(tagname, iaux=None, layer=None): def netcdf_package(mtype, ptype, auxiliary=None, mesh=None, nlay=1): from .data.mfstructure import DfnPackage, MFSimulationStructure - attrs = {} + entries = {} sim_struct = MFSimulationStructure() for package in MFPackage.__subclasses__(): @@ -3595,7 +3598,7 @@ def netcdf_package(mtype, ptype, auxiliary=None, mesh=None, nlay=1): for d in block.data_structures: if block.data_structures[d].netcdf: MFPackage._add_netcdf_entries( - attrs, + entries, f"<{mtype}name>", pname, block.data_structures[d], @@ -3604,10 +3607,10 @@ def netcdf_package(mtype, ptype, auxiliary=None, mesh=None, nlay=1): nlay, ) - return attrs + return entries def netcdf_info(self, mesh=None): - attrs = {} + entries = {} if self.dimensions.get_aux_variables(): auxnames = list(self.dimensions.get_aux_variables()[0]) @@ -3626,7 +3629,7 @@ def netcdf_info(self, mesh=None): ): if dataset.structure.netcdf and dataset.has_data(): MFPackage._add_netcdf_entries( - attrs, + entries, self.model_name, self.package_name, dataset.structure, @@ -3635,7 +3638,7 @@ def netcdf_info(self, mesh=None): self.model_or_sim.modelgrid.nlay, ) - return attrs + return entries def update_dataset(self, dataset, netcdf_info=None, mesh=None): from ..discretization.vertexgrid import VertexGrid @@ -3684,7 +3687,7 @@ def _data_shape(shape): return dataset def _set_netcdf_storage(self, reset=False): - """Set griddata array dataset storage to netcdf. + """set array dataset storage to netcdf. Parameters ---------- diff --git a/flopy/mf6/mfsimbase.py b/flopy/mf6/mfsimbase.py index f7f8c5bc58..0840a0e6a6 100644 --- a/flopy/mf6/mfsimbase.py +++ b/flopy/mf6/mfsimbase.py @@ -1675,7 +1675,7 @@ def write_simulation( Writes out the simulation in silent mode (verbosity_level = 0) netcdf : str ASCII package files will be written as configured for NetCDF input. - 'mesh2d' and 'structured' are supported types. + 'layered' and 'structured' are supported types. """ sim_data = self.simulation_data diff --git a/flopy/mf6/utils/codegen/__init__.py b/flopy/mf6/utils/codegen/__init__.py index e70f86b5e1..762fcdba83 100644 --- a/flopy/mf6/utils/codegen/__init__.py +++ b/flopy/mf6/utils/codegen/__init__.py @@ -23,6 +23,7 @@ def _get_template_env(developmode: bool = True): env.filters["base"] = filters.base env.filters["title"] = filters.title env.filters["description"] = filters.description + env.filters["longname"] = filters.longname env.filters["prefix"] = filters.prefix env.filters["parent"] = filters.parent env.filters["skip_init"] = filters.skip_init diff --git a/flopy/mf6/utils/codegen/filters.py b/flopy/mf6/utils/codegen/filters.py index e63fb8d117..d0737543cf 100644 --- a/flopy/mf6/utils/codegen/filters.py +++ b/flopy/mf6/utils/codegen/filters.py @@ -102,6 +102,12 @@ def dfn_file_name(component_name: tuple[str, str]) -> str: return f"gwt-{component_name[1]}.dfn" return f"{component_name[0] or 'sim'}-{component_name[1]}.dfn" +def longname(var: dict) -> Any: + _longname = var.get("longname", None) + if _longname is not None: + return _longname + return None + def parent(dfn: dict, component_name: tuple[str, str]) -> str: # TODO should be no longer needed when parents are explicit in dfns """The input context's parent context type, if it can have a parent.""" From aa021fa6d8c36c6ce4ee32566b3eeb320232858b Mon Sep 17 00:00:00 2001 From: mjreno Date: Wed, 20 Aug 2025 14:53:49 -0400 Subject: [PATCH 23/44] move data update to package --- .docs/Notebooks/mf6_netcdf01_tutorial.py | 80 ++++++++--------- autotest/regression/test_model_netcdf.py | 39 +------- flopy/mf6/data/mfdataarray.py | 27 +++--- flopy/mf6/data/mfdatastorage.py | 30 +++++-- flopy/mf6/mfmodel.py | 48 +++------- flopy/mf6/mfpackage.py | 109 ++++++++++++++++++++++- 6 files changed, 194 insertions(+), 139 deletions(-) diff --git a/.docs/Notebooks/mf6_netcdf01_tutorial.py b/.docs/Notebooks/mf6_netcdf01_tutorial.py index 30a8bc1f29..69067f0a18 100644 --- a/.docs/Notebooks/mf6_netcdf01_tutorial.py +++ b/.docs/Notebooks/mf6_netcdf01_tutorial.py @@ -127,6 +127,12 @@ # write simulation with structured NetCDF file sim.write_simulation(netcdf="structured") +# ## Run MODFLOW 6 simulation with NetCDF input +# +# The simulation generated by this tutorial should be runnable by +# Extended MODFLOW 6, available from the nightly-build repository +# (linked above). + # success, buff = sim.run_simulation(silent=True, report=True) # assert success, pformat(buff) @@ -140,6 +146,12 @@ # write simulation with with layered mesh NetCDF file sim.write_simulation(netcdf="layered") +# ## Run MODFLOW 6 simulation with NetCDF input +# +# The simulation generated by this tutorial should be runnable by +# Extended MODFLOW 6, available from the nightly-build repository +# (linked above). + # success, buff = sim.run_simulation(silent=True, report=True) # assert success, pformat(buff) @@ -225,23 +237,6 @@ dis = gwf.get_package("dis") ds = dis.update_dataset(ds) -# ## Update array data -# -# We have created dataset array variables for the package but they do not yet -# define the expected input data for MODFLOW 6. We will take advantage of the -# existing simulation objects and update the dataset. -# -# Default dataset variable names are defined in the package `netcdf_info()` -# dictionary. Here we will use the info dictionary to programmatically update -# the dataset- for remaining packages we will hardcode the variable names -# being updated for maximum clarity. - -nc_info = dis.netcdf_info() -for v in nc_info: - name = nc_info[v]["attrs"]["modflow_input"].rsplit("/", 1)[1].lower() - d = getattr(dis, name) - ds[nc_info[v]["varname"]].values = d.get_data() - # ## Access `NPF` package NetCDF attributes # # Access package scoped NetCDF details by storing the dictionary returned @@ -270,12 +265,6 @@ nc_info["k"]["attrs"]["standard_name"] = "soil_hydraulic_conductivity_at_saturation" ds = npf.update_dataset(ds, netcdf_info=nc_info) -# ## Update array data - -# update dataset from npf arrays -ds["npf_icelltype"].values = npf.icelltype.get_data() -ds["npf_k_updated"].values = npf.k.get_data() - # ## Show dataset `NPF K` parameter with updates # print dataset npf k variable @@ -286,16 +275,12 @@ # ic ic = gwf.get_package("ic") ds = ic.update_dataset(ds) -ds["ic_strt"].values = ic.strt.get_data() # ## Update the dataset with supported `STO` arrays # storage sto = gwf.get_package("sto") ds = sto.update_dataset(ds) -ds["sto_iconvert"].values = sto.iconvert.get_data() -ds["sto_sy"].values = sto.sy.get_data() -ds["sto_ss"].values = sto.ss.get_data() # ## Update the dataset with supported `GHBG` arrays @@ -303,20 +288,6 @@ ghbg = gwf.get_package("ghbg_0") ds = ghbg.update_dataset(ds) -# ## Update array data - -# update bhead netcdf array from flopy perioddata -# timeseries step index is first of stress period -for p in ghbg.bhead.get_data(): - istp = sum(gwf.modeltime.nstp[0:p]) - ds["ghbg_0_bhead"].values[istp] = ghbg.bhead.get_data()[p] - -# update cond netcdf array from flopy perioddata -# timeseries step index is first of stress period -for p in ghbg.cond.get_data(): - istp = sum(gwf.modeltime.nstp[0:p]) - ds["ghbg_0_cond"].values[istp] = ghbg.cond.get_data()[p] - # ## Display generated dataset # show the dataset @@ -337,3 +308,30 @@ # success, buff = sim.run_simulation(silent=True, report=True) # assert success, pformat(buff) + +# ## Method 4: DIY with xarray +# +# The above method still uses FloPy objects to update the dataset arrays +# to values consistent with the state of the objects. The `netcdf_info` +# dictionary is intended to supported creation of the dataset without +# an existing simulation defined. The base dataset can be defined with +# `modelgrid` and `modeltime` objects, while the full package netcdf +# dictionary can retrieved with a static call to a model or package mf6 +# type. The auxiliary input is optional but does show the variables that +# would be required if auxiliary variables were defined in the package. + +# ## Demonstrate static call on MFPackage (structured dataset): + +netcdf_info = flopy.mf6.mfpackage.MFPackage.netcdf_package( + mtype="GWF", + ptype="NPF", + auxiliary=["CONCENTRATION"], +) +pprint(netcdf_info) + +# ## Demonstrate static call on MFPackage (layered dataset): + +netcdf_info = flopy.mf6.mfpackage.MFPackage.netcdf_package( + mtype="GWF", ptype="NPF", mesh="LAYERED", auxiliary=["CONCENTRATION"], nlay=2 +) +pprint(netcdf_info) diff --git a/autotest/regression/test_model_netcdf.py b/autotest/regression/test_model_netcdf.py index d84a7a1b7a..6539ff7a34 100644 --- a/autotest/regression/test_model_netcdf.py +++ b/autotest/regression/test_model_netcdf.py @@ -49,6 +49,8 @@ def check_netcdf(path, mobj, mesh=None): if p in packages: l = -1 assert "modflow_input" in da.attrs + assert "longname" in da.attrs + assert da.attrs["longname"] != "" if mesh is None: assert "layer" not in da.attrs else: @@ -82,14 +84,6 @@ def check_netcdf(path, mobj, mesh=None): assert np.allclose(ds.data_vars[varname].values, d.get_data()) -def update_dataset(dataset, pobj): - nc_info = pobj.netcdf_info() - for v in nc_info: - name = nc_info[v]["attrs"]["modflow_input"].rsplit("/", 1)[1].lower() - d = getattr(pobj, name) - dataset[nc_info[v]["varname"]].values = d.get_data() - - @pytest.mark.regression def test_uzf01_model_scope_nofile(function_tmpdir, example_data_path): sim_name = "uzf01" @@ -350,16 +344,6 @@ def test_uzf01_pkg_scope(function_tmpdir, example_data_path): # add all packages and update data for p in gwf.packagelist: ds = p.update_dataset(ds) - nc_info = p.netcdf_info() - for v in nc_info: - name = nc_info[v]["attrs"]["modflow_input"].rsplit("/", 1)[1].lower() - d = getattr(p, name) - if d.repeating: - for per in d.get_data(): - istp = sum(gwf.modeltime.nstp[0:per]) - ds[nc_info[v]["varname"]].values[istp] = d.get_data()[per] - else: - ds[nc_info[v]["varname"]].values = d.get_data() # write dataset to netcdf ds.to_netcdf(ws / fname, format="NETCDF4", engine="netcdf4") @@ -398,7 +382,6 @@ def test_uzf01_pkg_scope_modify(function_tmpdir, example_data_path): # update dataset with `DIS` arrays dis = gwf.get_package("dis") ds = dis.update_dataset(ds) - update_dataset(ds, dis) # get npf package netcdf info npf = gwf.get_package("npf") @@ -410,36 +393,18 @@ def test_uzf01_pkg_scope_modify(function_tmpdir, example_data_path): nc_info["k"]["attrs"]["standard_name"] = "soil_hydraulic_conductivity_at_saturation" ds = npf.update_dataset(ds, netcdf_info=nc_info) - # update dataset from npf arrays - ds["npf_icelltype"].values = npf.icelltype.get_data() - ds["npf_k_updated"].values = npf.k.get_data() - # ic ic = gwf.get_package("ic") ds = ic.update_dataset(ds) - update_dataset(ds, ic) # storage sto = gwf.get_package("sto") ds = sto.update_dataset(ds) - update_dataset(ds, sto) # update dataset with 'GHBG' arrays ghbg = gwf.get_package("ghbg_0") ds = ghbg.update_dataset(ds) - # update bhead netcdf array from flopy perioddata - # timeseries step index is first of stress period - for p in ghbg.bhead.get_data(): - istp = sum(gwf.modeltime.nstp[0:p]) - ds["ghbg_0_bhead"].values[istp] = ghbg.bhead.get_data()[p] - - # update cond netcdf array from flopy perioddata - # timeseries step index is first of stress period - for p in ghbg.cond.get_data(): - istp = sum(gwf.modeltime.nstp[0:p]) - ds["ghbg_0_cond"].values[istp] = ghbg.cond.get_data()[p] - # write dataset to netcdf ds.to_netcdf(ws / fname, format="NETCDF4", engine="netcdf4") diff --git a/flopy/mf6/data/mfdataarray.py b/flopy/mf6/data/mfdataarray.py index 412a2c4c0c..63642bbc1e 100644 --- a/flopy/mf6/data/mfdataarray.py +++ b/flopy/mf6/data/mfdataarray.py @@ -1242,23 +1242,26 @@ def _get_file_entry( layer_min = layer layer_max = shape_ml.inc_shape_idx(layer) - if data_storage.netcdf: - file_entry_array.append(f"{indent}{self.structure.name}{indent}NETCDF\n") - - elif layered_aux: + if layered_aux: aux_var_names = ( self.data_dimensions.package_dim.get_aux_variables()[0] ) for layer in range(0, len(aux_var_names)-1): - file_entry_array.append( - self._get_file_entry_layer( - [layer], - data_indent, - data_storage.layer_storage[layer].data_storage_type, - ext_file_action, - layered_aux, + if data_storage.netcdf: + if data_storage.has_data(): + file_entry_array.append(f"{indent}{aux_var_names[layer+1]}{indent}NETCDF\n") + else: + file_entry_array.append( + self._get_file_entry_layer( + [layer], + data_indent, + data_storage.layer_storage[layer].data_storage_type, + ext_file_action, + layered_aux, + ) ) - ) + elif data_storage.netcdf: + file_entry_array.append(f"{indent}{self.structure.name}{indent}NETCDF\n") else: for layer in shape_ml.indexes(layer_min, layer_max): diff --git a/flopy/mf6/data/mfdatastorage.py b/flopy/mf6/data/mfdatastorage.py index 17b59007e9..0d858bd96e 100644 --- a/flopy/mf6/data/mfdatastorage.py +++ b/flopy/mf6/data/mfdatastorage.py @@ -2371,8 +2371,15 @@ def _build_full_data(self, apply_multiplier=False): else: fill_value = None full_data = np.full(dimensions, fill_value, np_full_data_type) - is_aux = self.data_dimensions.structure.name == "aux" - if is_aux: + layer_aux = ( + self.data_dimensions.structure.name == "aux" + and not self.layered + ) + grid_aux = ( + self.data_dimensions.structure.name == "aux" + and self.layered + ) + if layer_aux: aux_data = [] if not self.layered: layers_to_process = [0] @@ -2398,7 +2405,7 @@ def _build_full_data(self, apply_multiplier=False): or len(self.layer_storage[layer].internal_data) > 0 and self.layer_storage[layer].internal_data[0] is None ): - if is_aux: + if layer_aux: full_data = None else: return None @@ -2409,9 +2416,14 @@ def _build_full_data(self, apply_multiplier=False): ): full_data = self.layer_storage[layer].internal_data * mult else: - full_data[layer] = ( - self.layer_storage[layer].internal_data * mult - ) + if grid_aux: + full_data = ( + self.layer_storage[layer].internal_data * mult + ) + else: + full_data[layer] = ( + self.layer_storage[layer].internal_data * mult + ) elif ( self.layer_storage[layer].data_storage_type == DataStorageType.internal_constant @@ -2469,11 +2481,11 @@ def _build_full_data(self, apply_multiplier=False): ): full_data = data_out else: - if is_aux and full_data.shape == data_out.shape: + if layer_aux and full_data.shape == data_out.shape: full_data = data_out else: full_data[layer] = data_out - if is_aux: + if layer_aux: if full_data is not None: all_none = False aux_data.append(full_data) @@ -2482,7 +2494,7 @@ def _build_full_data(self, apply_multiplier=False): np.nan, self.data_dimensions.structure.get_datum_type(True), ) - if is_aux: + if layer_aux: if all_none: return None else: diff --git a/flopy/mf6/mfmodel.py b/flopy/mf6/mfmodel.py index c658508a31..af7e13f8d3 100644 --- a/flopy/mf6/mfmodel.py +++ b/flopy/mf6/mfmodel.py @@ -936,13 +936,10 @@ def load_base( nc_filerecord = instance.name_file.nc_filerecord.get_data() if nc_filerecord: message = ( - "NetCDF input file is currently " - "unsupported for model load." - ) - raise MFDataException( - model=modelname, - message=message, + "NetCDF input file is currently unsupported " + f"for model load ({modelname})." ) + raise FlopyException(message) # order packages # FIX: Transport - Priority packages maybe should not be hard coded @@ -1316,6 +1313,7 @@ def write( ext_file_action=ExtFileAction.copy_relative_paths, netcdf=None, ): + from ..version import __version__ """ Writes out model's package files. @@ -1386,7 +1384,10 @@ def write( mesh=mesh, ) - ds = self.update_dataset(ds, mesh=mesh) + nc_info = self.netcdf_info(mesh=mesh) + nc_info["attrs"]["title"] = f"{self.name.upper()} input" + nc_info["attrs"]["source"] = f"flopy {__version__}" + ds = self.update_dataset(ds, netcdf_info=nc_info, mesh=mesh) # write dataset to netcdf fname = self.name_file.nc_filerecord.get_data()[0][0] @@ -2310,7 +2311,7 @@ def netcdf_info(self, mesh=None): self.name, self.model_type, self.get_grid_type(), mesh ) - def update_dataset(self, dataset, netcdf_info=None, mesh=None): + def update_dataset(self, dataset, netcdf_info=None, mesh=None, update_data=True): if netcdf_info is None: nc_info = self.netcdf_info(mesh=mesh) else: @@ -2322,35 +2323,6 @@ def update_dataset(self, dataset, netcdf_info=None, mesh=None): # add all packages and update data for p in self.packagelist: # add package var to dataset - dataset = p.update_dataset(dataset, mesh=mesh) - - # update dataset var values - nc_info = p.netcdf_info(mesh=mesh) - for v in nc_info: - name = nc_info[v]["attrs"]["modflow_input"].split("/")[2].lower() - if mesh == None: - d = getattr(p, name) - if d.repeating: - for per in d.get_data(): - istp = sum(self.modeltime.nstp[0:per]) - dataset[nc_info[v]["varname"]].values[istp] = d.get_data()[per] - else: - dataset[nc_info[v]["varname"]].values = d.get_data() - elif mesh.upper() == "LAYERED": - if "layer" in nc_info[v]["attrs"]: - layer = nc_info[v]["attrs"]["layer"] - 1 - else: - layer = -1 - d = getattr(p, name) - if d.repeating: - for per in d.get_data(): - if d.get_data()[per] is not None: - istp = sum(self.modeltime.nstp[0:per]) - dataset[nc_info[v]["varname"]].values[istp] = d.get_data()[per][layer] - else: - if layer >= 0: - dataset[nc_info[v]["varname"]].values = d.get_data()[layer].flatten() - else: - dataset[nc_info[v]["varname"]].values = d.get_data().flatten() + dataset = p.update_dataset(dataset, mesh=mesh, update_data=update_data) return dataset diff --git a/flopy/mf6/mfpackage.py b/flopy/mf6/mfpackage.py index 04228426c5..da9c3d604d 100644 --- a/flopy/mf6/mfpackage.py +++ b/flopy/mf6/mfpackage.py @@ -3512,6 +3512,14 @@ def _add_entry(tagname, iaux=None, layer=None): dims += ["x", "y", "z", "time"] elif mesh.upper() == "LAYERED": dims += ["nmesh_face", "time"] + elif ( + data_item.block_name == "period" and + 'ncpl' in data_item.shape + ): + if mesh is None: + dims += ["x", "y", "time"] + elif mesh.upper() == "LAYERED": + dims += ["nmesh_face", "time"] else: if mesh is None: dimmap = {"nlay": "z", "nrow": "y", "ncol": "x"} @@ -3584,7 +3592,7 @@ def netcdf_package(mtype, ptype, auxiliary=None, mesh=None, nlay=1): c, sc = p.dfn_file_name.split(".")[0].split("-") if c == mtype.lower() and sc == ptype.lower(): sim_struct.add_package(p, model_file=False) - exit + break if ptype.lower() in sim_struct.package_struct_objs: pso = sim_struct.package_struct_objs[ptype.lower()] @@ -3640,7 +3648,7 @@ def netcdf_info(self, mesh=None): return entries - def update_dataset(self, dataset, netcdf_info=None, mesh=None): + def update_dataset(self, dataset, netcdf_info=None, mesh=None, update_data=True): from ..discretization.vertexgrid import VertexGrid if netcdf_info is None: nc_info = self.netcdf_info(mesh=mesh) @@ -3655,6 +3663,7 @@ def update_dataset(self, dataset, netcdf_info=None, mesh=None): "time": sum(modeltime.nstp), "z": modelgrid.nlay, "nmesh_face": modelgrid.ncpl, + "ncpl": modelgrid.ncpl, } else: dimmap = { @@ -3663,8 +3672,89 @@ def update_dataset(self, dataset, netcdf_info=None, mesh=None): "y": modelgrid.nrow, "x": modelgrid.ncol, "nmesh_face": modelgrid.ncpl, + "ncpl": modelgrid.ncpl, } + def _update_data(nc_info, key, pobj=None, data=None): + if "modflow_iaux" in nc_info[key]["attrs"]: + iaux = nc_info[key]["attrs"]["modflow_iaux"] - 1 + else: + iaux = -1 + if mesh == None: + if pobj.repeating: + if iaux >= 0: + for k in pobj._data_storage.keys(): + istp = sum(modeltime.nstp[0:k]) + pobj.get_data_prep(k) + auxdata = pobj._data_storage[k].get_data(iaux) + dataset[nc_info[key]["varname"]].values[istp, :] = ( + auxdata) + else: + for per in data: + if data[per] is None: + continue + istp = sum(modeltime.nstp[0:per]) + if ( + pobj.structure.data_item_structures[0].numeric_index or + pobj.structure.data_item_structures[0].is_cellid): + dataset[nc_info[key]["varname"]].values[istp, :] = ( + data[per] + 1) + else: + dataset[nc_info[key]["varname"]].values[istp, :] = ( + data[per]) + else: + dataset[nc_info[key]["varname"]].values = data + elif mesh.upper() == "LAYERED": + if "layer" in nc_info[key]["attrs"]: + layer = nc_info[key]["attrs"]["layer"] - 1 + else: + layer = -1 + if pobj.repeating: + if iaux >= 0: + for k in pobj._data_storage.keys(): + istp = sum(modeltime.nstp[0:k]) + pobj.get_data_prep(k) + auxdata = pobj._data_storage[k].get_data(iaux) + if self.structure.read_as_arrays: + uidx = istp + auxdata.size + dataset[nc_info[key]["varname"]].values[istp:uidx] = ( + auxdata.flatten()) + elif self.structure.read_array_grid: + uidx = istp + auxdata[layer].size + dataset[nc_info[key]["varname"]].values[istp:uidx] = ( + auxdata[layer].flatten()) + else: + for per in data: + if data[per] is None: + continue + istp = sum(modeltime.nstp[0:per]) + if layer >= 0 and ( + len(pobj.structure.shape) == 3 or + pobj.structure.shape[0] == 'nodes'): + uidx = istp + data[per][layer].size + dataset[nc_info[key]["varname"]].values[istp:uidx] = ( + data[per][layer].flatten()) + else: + uidx = istp + data[per].size + if ( + pobj.structure.data_item_structures[0].numeric_index or + pobj.structure.data_item_structures[0].is_cellid): + dataset[nc_info[key]["varname"]].values[istp:uidx] = ( + data[per].flatten() + 1) + else: + dataset[nc_info[key]["varname"]].values[istp:uidx] = ( + data[per].flatten()) + else: + if layer >= 0 and ( + 'nlay' in pobj.structure.shape or + pobj.structure.shape[0] == 'nodes'): + dataset[nc_info[key]["varname"]].values = ( + data[layer].flatten()) + else: + dataset[nc_info[key]["varname"]].values = ( + data.flatten()) + + def _data_shape(shape): dims_l = [] for d in shape: @@ -3672,6 +3762,9 @@ def _data_shape(shape): return dims_l + last_path = '' + pkg = None + pdata = None for v in nc_info: varname = nc_info[v]["varname"] data = np.full( @@ -3684,6 +3777,18 @@ def _data_shape(shape): for a in nc_info[v]["attrs"]: dataset[varname].attrs[a] = nc_info[v]["attrs"][a] + if update_data: + path = nc_info[v]["attrs"]["modflow_input"] + tag = path.split("/")[2].lower() + if path != last_path: + pkg = None + pdata = None + pkg = getattr(self, tag) + if tag != "aux": + pdata = pkg.get_data() + last_path = path + _update_data(nc_info, v, pkg, pdata) + return dataset def _set_netcdf_storage(self, reset=False): From 2e67740f31aac45fa904f6d289f12a1329a1c997 Mon Sep 17 00:00:00 2001 From: mjreno Date: Wed, 20 Aug 2025 16:13:58 -0400 Subject: [PATCH 24/44] fix structure attribute --- flopy/mf6/data/mfdatastorage.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flopy/mf6/data/mfdatastorage.py b/flopy/mf6/data/mfdatastorage.py index 0d858bd96e..d806bb98df 100644 --- a/flopy/mf6/data/mfdatastorage.py +++ b/flopy/mf6/data/mfdatastorage.py @@ -2373,11 +2373,11 @@ def _build_full_data(self, apply_multiplier=False): full_data = np.full(dimensions, fill_value, np_full_data_type) layer_aux = ( self.data_dimensions.structure.name == "aux" - and not self.layered + and not self.data_dimensions.structure.layered ) grid_aux = ( self.data_dimensions.structure.name == "aux" - and self.layered + and self.data_dimensions.structure.layered ) if layer_aux: aux_data = [] From d3c98297669f5421dab8dbf6e9e87a8de33784ac Mon Sep 17 00:00:00 2001 From: mjreno Date: Tue, 26 Aug 2025 16:55:20 -0400 Subject: [PATCH 25/44] start mf6 grid array test --- autotest/test_mf6.py | 468 ++++++++++++++++++++++++++++++++ flopy/mf6/data/mfdataarray.py | 45 ++- flopy/mf6/data/mfdatastorage.py | 37 ++- flopy/mf6/mfmodel.py | 11 +- flopy/mf6/mfpackage.py | 94 +++---- flopy/mf6/mfsimbase.py | 2 +- 6 files changed, 592 insertions(+), 65 deletions(-) diff --git a/autotest/test_mf6.py b/autotest/test_mf6.py index fc879eff78..4efffef3b3 100644 --- a/autotest/test_mf6.py +++ b/autotest/test_mf6.py @@ -39,6 +39,7 @@ ModflowGwfsto, ModflowGwfuzf, ModflowGwfwel, + ModflowGwfwelg, ModflowGwtadv, ModflowGwtdis, ModflowGwtic, @@ -1961,6 +1962,473 @@ def test_array(function_tmpdir): assert lak_tab_2[4][1] == 503000.0 +@requires_exe("mf6") +def test_grid_array(function_tmpdir): + # get_data + # empty data in period block vs data repeating + # array + # aux values, test that they work the same as other arrays (is a value + # of zero always used even if aux is defined in a previous stress + # period?) + + sim_name = "test_grid_array" + model_name = "test_grid_array" + out_dir = function_tmpdir + tdis_name = f"{sim_name}.tdis" + sim = MFSimulation(sim_name=sim_name, version="mf6", exe_name="mf6", sim_ws=out_dir) + tdis_rc = [(6.0, 2, 1.0), (6.0, 3, 1.0), (6.0, 3, 1.0), (6.0, 3, 1.0)] + tdis = ModflowTdis(sim, time_units="DAYS", nper=4, perioddata=tdis_rc) + ims_package = ModflowIms( + sim, + pname="my_ims_file", + filename=f"{sim_name}.ims", + print_option="ALL", + complexity="SIMPLE", + outer_dvclose=0.0001, + outer_maximum=50, + under_relaxation="NONE", + inner_maximum=30, + inner_dvclose=0.0001, + linear_acceleration="CG", + preconditioner_levels=7, + preconditioner_drop_tolerance=0.01, + number_orthogonalizations=2, + ) + model = ModflowGwf(sim, modelname=model_name, model_nam_file=f"{model_name}.nam") + + dis = ModflowGwfdis( + model, + length_units="FEET", + nlay=4, + nrow=2, + ncol=2, + delr=5000.0, + delc=5000.0, + top=100.0, + botm=[50.0, 0.0, -50.0, -100.0], + filename=f"{model_name} 1.dis", + ) + ic_package = ModflowGwfic(model, strt=90.0, filename=f"{model_name}.ic") + npf_package = ModflowGwfnpf( + model, + pname="npf_1", + save_flows=True, + alternative_cell_averaging="logarithmic", + icelltype=1, + k=50.0, + ) + + oc_package = ModflowGwfoc( + model, + budget_filerecord=[("test_array.cbc",)], + head_filerecord=[("test_array.hds",)], + saverecord={ + 0: [("HEAD", "ALL"), ("BUDGET", "ALL")], + 1: [], + }, + printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], + ) + + aux = {1: [[50.0], [1.3]], 3: [[200.0], [1.5]]} + irch = {1: [[0, 2], [2, 1]], 2: [[0, 1], [2, 3]]} + rcha = ModflowGwfrcha( + model, + print_input=True, + print_flows=True, + auxiliary=[("var1", "var2")], + irch=irch, + recharge={1: 0.0001, 2: 0.00001}, + aux=aux, + ) + val_irch = rcha.irch.array.sum(axis=(1, 2, 3)) + assert val_irch[0] == 4 + assert val_irch[1] == 5 + assert val_irch[2] == 6 + assert val_irch[3] == 6 + val_irch_2 = rcha.irch.get_data() + assert val_irch_2[0] is None + assert val_irch_2[1][1, 1] == 1 + assert val_irch_2[2][1, 1] == 3 + assert val_irch_2[3] is None + val_irch_2_3 = rcha.irch.get_data(3) + assert val_irch_2_3 is None + val_rch = rcha.recharge.array.sum(axis=(1, 2, 3)) + assert val_rch[0] == 0.0 + assert val_rch[1] == 0.0004 + assert val_rch[2] == 0.00004 + assert val_rch[3] == 0.00004 + val_rch_2 = rcha.recharge.get_data() + assert val_rch_2[0] is None + assert val_rch_2[1][0, 0] == 0.0001 + assert val_rch_2[2][0, 0] == 0.00001 + assert val_rch_2[3] is None + aux_data_0 = rcha.aux.get_data(0) + assert aux_data_0 is None + aux_data_1 = rcha.aux.get_data(1) + assert aux_data_1[0][0][0] == 50.0 + aux_data_2 = rcha.aux.get_data(2) + assert aux_data_2 is None + aux_data_3 = rcha.aux.get_data(3) + assert aux_data_3[0][0][0] == 200.0 + + nlay = dis.nlay.get_data() + nrow = dis.nrow.get_data() + ncol = dis.ncol.get_data() + + DNODATA = 3.0e30 # MF6 DNODATA constant + welqspd = {} + welconcspd = {} + for n in range(4): + q = np.full((nlay, nrow, ncol), DNODATA, dtype=float) + welconc = np.full((nlay, nrow, ncol), DNODATA, dtype=float) + welaux2 = np.full((nlay, nrow, ncol), DNODATA, dtype=float) + if n == 1: + q[0, 0, 0] = 0.25 + welconc[0, 0, 0] = 0.0 + welaux2[0, 0, 0] = 9.0 + elif n == 2: + q[0, 0, 0] = 0.1 + welconc[0, 0, 0] = 9.0 + welaux2[0, 0, 0] = 0.0 + welqspd[n] = q + welconcspd[n] = [welconc, welaux2] + + # first create test package with multiple auxvars + wel = ModflowGwfwelg( + model, + print_input=True, + print_flows=True, + mover=True, + save_flows=False, + auxiliary=["var1", "var2"], + pname="WEL-1", + q=welqspd, + aux=welconcspd, + ) + + assert len(wel.q.array) == 4 + assert len(wel.q.get_data()) == 4 + assert len(wel.aux.array) == 4 + assert len(wel.aux.get_data()) == 4 + assert np.allclose(wel.aux.array[0][0], wel.aux.get_data(0)[0]) + assert np.allclose(wel.aux.array[0][1], wel.aux.get_data(0)[1]) + assert np.allclose(wel.aux.array[1][0], wel.aux.get_data(1)[0]) + assert np.allclose(wel.aux.array[1][1], wel.aux.get_data(1)[1]) + assert np.allclose(wel.aux.array[2][0], wel.aux.get_data(2)[0]) + assert np.allclose(wel.aux.array[2][1], wel.aux.get_data(2)[1]) + assert np.allclose(wel.aux.array[3][0], wel.aux.get_data(3)[0]) + assert np.allclose(wel.aux.array[3][1], wel.aux.get_data(3)[1]) + assert np.allclose(wel.aux.array[0][0], wel.aux.get_data()[0][0]) + assert np.allclose(wel.aux.array[0][1], wel.aux.get_data()[0][1]) + assert np.allclose(wel.aux.array[1][0], wel.aux.get_data()[1][0]) + assert np.allclose(wel.aux.array[1][1], wel.aux.get_data()[1][1]) + assert np.allclose(wel.aux.array[2][0], wel.aux.get_data()[2][0]) + assert np.allclose(wel.aux.array[2][1], wel.aux.get_data()[2][1]) + assert np.allclose(wel.aux.array[3][0], wel.aux.get_data()[3][0]) + assert np.allclose(wel.aux.array[3][1], wel.aux.get_data()[3][1]) + # assert wel.q.get_data()[0] is None + # assert wel.q.get_data(0) is None + # assert np.allclose(wel.q.get_data()[1], wel.q.get_data(1)) + # assert np.allclose(wel.q.get_data()[2], wel.q.get_data(2)) + assert len(wel.q.array) == 4 + # assert np.allclose(wel.q.array[1], wel.q.get_data(1)) + # assert np.allclose(wel.q.array[2], wel.q.get_data(2)) + # assert wel.q.get_data()[3] is None + # assert wel.q.get_data(3) is None + + assert not wel.has_stress_period_data + q_nan = np.where(wel.q.array == DNODATA, np.nan, wel.q.array) + val_q = np.nansum(q_nan, axis=(1, 2, 3, 4)) + assert val_q[0] == 0.0 + assert val_q[1] == 0.25 + assert val_q[2] == 0.1 + assert val_q[3] == 0.0 + val_q_2 = wel.q.get_data() + assert np.all(val_q_2[0] == DNODATA) + assert val_q_2[1][0, 0, 0] == 0.25 + assert val_q_2[2][0, 0, 0] == 0.1 + assert np.all(val_q_2[3] == DNODATA) + aux_data_0 = wel.aux.get_data(0) + assert np.all(aux_data_0[0] == DNODATA) + aux_data_1 = wel.aux.get_data(1) + assert aux_data_1[0][0][0][0] == 0.0 + assert aux_data_1[1][0][0][0] == 9.0 + aux_data_2 = wel.aux.get_data(2) + assert aux_data_2[0][0][0][0] == 9.0 + assert aux_data_2[1][0][0][0] == 0.0 + aux_data_3 = wel.aux.get_data(3) + assert np.all(aux_data_3[0] == DNODATA) + # assert wel.q[0] is None + # assert wel.q[1[0][1] == 0.25 + + # remove test wel package + wel.remove() + + welqspd = {} + welconcspd = {} + for n in range(2): + q = np.full((nlay, nrow, ncol), DNODATA, dtype=float) + welconc = np.full((nlay, nrow, ncol), DNODATA, dtype=float) + if n == 0: + q[0, 0, 0] = 0.25 + welconc[0, 0, 0] = 0.0 + elif n == 1: + q[0, 0, 0] = 0.1 + welconc[0, 0, 0] = 0.0 + welqspd[n + 1] = q + welconcspd[n + 1] = [welconc] + + # create welg package + wel = ModflowGwfwelg( + model, + print_input=True, + print_flows=True, + mover=True, + save_flows=False, + auxiliary=["CONCENTRATION"], + pname="WEL-1", + q=welqspd, + aux=welconcspd, + ) + + assert len(wel.q.array) == 4 + assert len(wel.q.get_data()) == 4 + assert len(wel.aux.array) == 4 + assert len(wel.aux.get_data()) == 4 + assert wel.q.get_data()[0] is None + assert wel.q.get_data(0) is None + wel_q_array = wel.q.array + assert np.allclose(wel.q.get_data()[1], wel.q.get_data(1)) + assert np.allclose(wel.q.get_data()[2], wel.q.get_data(2)) + assert np.allclose(wel.q.array[1], wel.q.get_data(1)) + assert np.allclose(wel.q.array[2], wel.q.get_data(2)) + assert wel.q.get_data()[3] is None + assert wel.q.get_data(3) is None + assert np.allclose(wel.aux.array[1][0], wel.aux.get_data(1)[0]) + assert np.allclose(wel.aux.array[2][0], wel.aux.get_data(2)[0]) + assert not wel.has_stress_period_data + q_nan = np.where(wel_q_array == DNODATA, np.nan, wel_q_array) + val_q = np.nansum(q_nan, axis=(1, 2, 3, 4)) + assert val_q[0] == 0.0 + assert val_q[1] == 0.25 + assert val_q[2] == 0.1 + assert val_q[3] == 0.1 + val_q_2 = wel.q.get_data() + assert val_q_2[0] is None + assert val_q_2[1][0, 0, 0] == 0.25 + assert val_q_2[2][0, 0, 0] == 0.1 + assert val_q_2[3] is None + aux_data_0 = wel.aux.get_data(0) + assert aux_data_0 is None + aux_data_1 = wel.aux.get_data(1) + assert aux_data_1[0][0][0][0] == 0.0 + assert aux_data_1[0][0, 0, 0] == 0.0 + aux_data_2 = wel.aux.get_data(2) + assert aux_data_2[0][0, 0, 0] == 0.0 + aux_data_3 = wel.aux.get_data(3) + assert aux_data_3 is None + + drnspdict = { + 0: [[(0, 0, 0), 60.0, 10.0]], + 2: [], + 3: [[(0, 0, 0), 55.0, 5.0]], + } + drn = ModflowGwfdrn( + model, + print_input=True, + print_flows=True, + stress_period_data=drnspdict, + save_flows=False, + pname="DRN-1", + ) + drn_array = drn.stress_period_data.array + assert drn_array[0][0][1] == 60.0 + assert drn_array[1][0][1] == 60.0 + assert drn_array[2] is None + assert drn_array[3][0][1] == 55.0 + drn_gd_0 = drn.stress_period_data.get_data(0) + assert drn_gd_0[0][1] == 60.0 + drn_gd_1 = drn.stress_period_data.get_data(1) + assert drn_gd_1 is None + drn_gd_2 = drn.stress_period_data.get_data(2) + assert len(drn_gd_2) == 0 + drn_gd_3 = drn.stress_period_data.get_data(3) + assert drn_gd_3[0][1] == 55.0 + + ghbspdict = { + 0: [[(0, 1, 1), 60.0, 10.0]], + } + ghb = ModflowGwfghb( + model, + print_input=True, + print_flows=True, + stress_period_data=ghbspdict, + save_flows=False, + pname="GHB-1", + ) + + lakpd = [(0, 70.0, 1), (1, 65.0, 1)] + lakecn = [ + (0, 0, (0, 0, 0), "HORIZONTAL", 1.0, 60.0, 90.0, 10.0, 1.0), + (1, 0, (0, 1, 1), "HORIZONTAL", 1.0, 60.0, 90.0, 10.0, 1.0), + ] + lak_tables = [(0, "lak01.tab"), (1, "lak02.tab")] + lak = ModflowGwflak( + model, + pname="lak", + print_input=True, + mover=True, + nlakes=2, + noutlets=0, + ntables=1, + packagedata=lakpd, + connectiondata=lakecn, + tables=lak_tables, + ) + + table_01 = [ + (30.0, 100000.0, 10000.0), + (40.0, 200500.0, 10100.0), + (50.0, 301200.0, 10130.0), + (60.0, 402000.0, 10180.0), + (70.0, 503000.0, 10200.0), + (80.0, 700000.0, 20000.0), + ] + lak_tab = ModflowUtllaktab( + model, + filename="lak01.tab", + nrow=6, + ncol=3, + table=table_01, + ) + + table_02 = [ + (40.0, 100000.0, 10000.0), + (50.0, 200500.0, 10100.0), + (60.0, 301200.0, 10130.0), + (70.0, 402000.0, 10180.0), + (80.0, 503000.0, 10200.0), + (90.0, 700000.0, 20000.0), + ] + lak_tab_2 = ModflowUtllaktab( + model, + filename="lak02.tab", + nrow=6, + ncol=3, + table=table_02, + ) + wel_name_1 = wel.name[0] + lak_name_2 = lak.name[0] + package_data = [(wel_name_1,), (lak_name_2,)] + period_data = [(wel_name_1, 0, lak_name_2, 0, "FACTOR", 1.0)] + fname = f"{model.name}.input.mvr" + mvr = ModflowGwfmvr( + parent_model_or_package=model, + filename=fname, + print_input=True, + print_flows=True, + maxpackages=2, + maxmvr=1, + packages=package_data, + perioddata=period_data, + ) + + # test writing and loading model + print(wel.aux.array) + sim.write_simulation() + print(wel.aux.array) + sim.run_simulation() + print(wel.aux.array) + + test_sim = MFSimulation.load( + sim_name, + "mf6", + "mf6", + out_dir, + write_headers=False, + ) + model = test_sim.get_model() + dis = model.get_package("dis") + rcha = model.get_package("rcha") + wel = model.get_package("wel") + drn = model.get_package("drn") + lak = model.get_package("lak") + lak_tab = model.get_package("laktab") + assert os.path.split(dis.filename)[1] == f"{model_name} 1.dis" + # do same tests as above + val_irch = rcha.irch.array.sum(axis=(1, 2, 3)) + assert val_irch[0] == 4 + assert val_irch[1] == 5 + assert val_irch[2] == 6 + assert val_irch[3] == 6 + val_irch_2 = rcha.irch.get_data() + assert val_irch_2[0] is None + assert val_irch_2[1][1, 1] == 1 + assert val_irch_2[2][1, 1] == 3 + assert val_irch_2[3] is None + val_rch = rcha.recharge.array.sum(axis=(1, 2, 3)) + assert val_rch[0] == 0.0 + assert val_rch[1] == 0.0004 + assert val_rch[2] == 0.00004 + assert val_rch[3] == 0.00004 + val_rch_2 = rcha.recharge.get_data() + assert val_rch_2[0] is None + assert val_rch_2[1][0, 0] == 0.0001 + assert val_rch_2[2][0, 0] == 0.00001 + assert val_rch_2[3] is None + aux_data_0 = rcha.aux.get_data(0) + assert aux_data_0 is None + aux_data_1 = rcha.aux.get_data(1) + assert aux_data_1[0][0][0] == 50.0 + aux_data_2 = rcha.aux.get_data(2) + assert aux_data_2 is None + aux_data_3 = rcha.aux.get_data(3) + assert aux_data_3[0][0][0] == 200.0 + + # TODO + wel_q_array = wel.q.array + assert wel_q_array[1][0][0, 0, 0] == 0.25 + assert wel_q_array[2][0][0, 0, 0] == 0.1 + # assert wel_array[3][0][1] == 0.1 + welg_q_per = wel.q.get_data() + assert welg_q_per[0] is None + assert welg_q_per[1][0, 0, 0] == 0.25 + assert welg_q_per[2][0, 0, 0] == 0.1 + # assert welg_q_per[3][0, 0, 0] == 0.1 + + welg_q_per1 = wel.q.get_data(1) + # print(wel.q.array) + assert welg_q_per1[0, 0, 0] == 0.25 + welg_aux_per1 = wel.aux.get_data(1) + assert welg_aux_per1[0][0, 0, 0] == 0.0 + + drn_array = drn.stress_period_data.array + assert drn_array[0][0][1] == 60.0 + assert drn_array[1][0][1] == 60.0 + assert drn_array[2] is None + assert drn_array[3][0][1] == 55.0 + drn_gd_0 = drn.stress_period_data.get_data(0) + assert drn_gd_0[0][1] == 60.0 + drn_gd_1 = drn.stress_period_data.get_data(1) + assert drn_gd_1 is None + drn_gd_2 = drn.stress_period_data.get_data(2) + assert len(drn_gd_2) == 0 + drn_gd_3 = drn.stress_period_data.get_data(3) + assert drn_gd_3[0][1] == 55.0 + + lak_tab_array = lak.tables.get_data() + assert lak_tab_array[0][1] == "lak01.tab" + assert lak_tab_array[1][1] == "lak02.tab" + + assert len(lak_tab) == 2 + lak_tab_1 = lak_tab[0].table.get_data() + assert lak_tab_1[0][0] == 30.0 + assert lak_tab_1[5][2] == 20000.0 + lak_tab_2 = lak_tab[1].table.get_data() + assert lak_tab_2[0][0] == 40.0 + assert lak_tab_2[4][1] == 503000.0 + + @requires_exe("mf6") def test_multi_model(function_tmpdir): # init paths diff --git a/flopy/mf6/data/mfdataarray.py b/flopy/mf6/data/mfdataarray.py index 63642bbc1e..a51df58e49 100644 --- a/flopy/mf6/data/mfdataarray.py +++ b/flopy/mf6/data/mfdataarray.py @@ -734,6 +734,8 @@ def _get_data(self, layer=None, apply_mult=False, **kwargs): and kwargs["array"] and isinstance(self, MFTransientArray) and data is not [] # noqa: F632 + and not self._is_grid_aux() + and not self._is_layered_aux() ): data = np.expand_dims(data, 0) return data @@ -1131,6 +1133,16 @@ def _is_layered_aux(self): else: return False + def _is_grid_aux(self): + # determine if this is a full grid aux variable + if ( + self.structure.name.lower() == "aux" + and self.structure.layered + ): + return True + else: + return False + def get_file_entry( self, layer=None, ext_file_action=ExtFileAction.copy_relative_paths ): @@ -1469,7 +1481,12 @@ def _get_data_layer_string(self, layer, data_indent): self._path, self._current_key, ) - return file_access.get_data_string(data, self._data_type, data_indent) + if self._is_grid_aux(): + return file_access.get_data_string( + [a.ravel().tolist() for a in data], self._data_type, data_indent + ) + else: + return file_access.get_data_string(data, self._data_type, data_indent) def _resolve_layer_index(self, layer, allow_multiple_layers=False): # handle layered vs non-layered data @@ -1796,11 +1813,13 @@ def _get_array(self, num_sp, apply_mult, **kwargs): """ data = None output = None + baseshape = None for sp in range(0, num_sp): if sp in self._data_storage: self.get_data_prep(sp) data = super().get_data(apply_mult=apply_mult, **kwargs) - data = np.expand_dims(data, 0) + if not (self._is_grid_aux() or self._is_layered_aux()): + data = np.expand_dims(data, 0) else: # if there is no previous data provide array of # zeros, otherwise provide last array of data found @@ -1818,11 +1837,29 @@ def _get_array(self, num_sp, apply_mult, **kwargs): data = np.full_like(data, 1) else: data = np.full_like(data, 0.0) - data = np.expand_dims(data, 0) + if not (self._is_grid_aux() or self._is_layered_aux()): + data = np.expand_dims(data, 0) if output is None or data is None: output = data + if data is not None: + baseshape = np.shape(data) + if self._is_grid_aux(): + output = np.expand_dims(output, 0) else: - output = np.concatenate((output, data)) + if np.all(output == None): + baseshape = np.shape(data) + output = np.full(np.shape(data), np.nan, self.dtype) + output = np.concatenate((output, data)) + if self._is_grid_aux(): + output = np.expand_dims(output, 0) + elif np.all(data == None): + anone = np.full(baseshape, np.nan, self.dtype) + output = np.append(output, anone, axis=0) + else: + if self._is_grid_aux() and np.shape(data) == baseshape: + data = np.expand_dims(data, 0) + output = np.concatenate((output, data)) + return output def has_data(self, layer=None): diff --git a/flopy/mf6/data/mfdatastorage.py b/flopy/mf6/data/mfdatastorage.py index d806bb98df..3c47eabd21 100644 --- a/flopy/mf6/data/mfdatastorage.py +++ b/flopy/mf6/data/mfdatastorage.py @@ -2379,10 +2379,15 @@ def _build_full_data(self, apply_multiplier=False): self.data_dimensions.structure.name == "aux" and self.data_dimensions.structure.layered ) - if layer_aux: + if layer_aux or grid_aux: aux_data = [] if not self.layered: layers_to_process = [0] + elif grid_aux: + layers_to_process = [] + auxvar = self.data_dimensions.package_dim.get_aux_variables()[0] + for i in range(len(auxvar) - 1): + layers_to_process.append(i) else: layers_to_process = self.layer_storage.indexes() for layer in layers_to_process: @@ -2420,6 +2425,7 @@ def _build_full_data(self, apply_multiplier=False): full_data = ( self.layer_storage[layer].internal_data * mult ) + aux_data.append(full_data) else: full_data[layer] = ( self.layer_storage[layer].internal_data * mult @@ -2434,6 +2440,9 @@ def _build_full_data(self, apply_multiplier=False): or not self._has_layer_dim() ): full_data = self._fill_const_layer(layer) * mult + elif grid_aux: + full_data = self._fill_const_grid(layer) * mult + aux_data.append(full_data) else: full_data[layer] = self._fill_const_layer(layer) * mult else: @@ -2499,6 +2508,11 @@ def _build_full_data(self, apply_multiplier=False): return None else: return np.stack(aux_data, axis=0) + elif grid_aux: + if len(aux_data) == 0: + return [full_data] + else: + return aux_data else: return full_data @@ -2527,6 +2541,18 @@ def _fill_const_layer(self, layer): ) return np.full(data_dimensions, ls.data_const_value[0], data_type) + def _fill_const_grid(self, layer): + data_dimensions = self.get_data_dimensions(None) + #ls = self.layer_storage.first_item() + ls = self.layer_storage[layer] + if data_dimensions[0] < 0: + return ls.data_const_value[0] + else: + data_type = self.data_dimensions.structure.get_datum_type( + numpy_type=True + ) + return np.full(data_dimensions, ls.data_const_value[0], data_type) + def _is_type(self, data_item, data_type): if data_type == DatumType.string or data_type == DatumType.keyword: return True @@ -3012,12 +3038,15 @@ def _get_max_min_data_line_size(data): def get_data_dimensions(self, layer): data_dimensions = self.data_dimensions.get_data_shape()[0] - is_aux = self.data_dimensions.structure.name == "aux" + grid_aux = ( + self.data_dimensions.structure.name == "aux" + and self.data_dimensions.structure.layered + ) if ( - not is_aux - and layer is not None + layer is not None and self.layer_storage.get_total_size() > 1 and self._has_layer_dim() + and not grid_aux ): # remove all "layer" dimensions from the list layer_dims = self.data_dimensions.structure.data_item_structures[ diff --git a/flopy/mf6/mfmodel.py b/flopy/mf6/mfmodel.py index af7e13f8d3..6047b56b00 100644 --- a/flopy/mf6/mfmodel.py +++ b/flopy/mf6/mfmodel.py @@ -1325,16 +1325,13 @@ def write( with relative paths, leaving files defined by absolute paths fixed. netcdf : str ASCII package files will be written as configured for NetCDF input. - 'layered' and 'structured' are supported types. + 'layered', 'structured' and 'nofile' are supported arguments. """ - write_netcdf = netcdf and ( - self.model_type == "gwf6" - or self.model_type == "gwt6" - or self.model_type == "gwe6" - ) + write_netcdf = netcdf and hasattr(self.name_file, "nc_filerecord") if write_netcdf: + # update name file for input even if "nofile" is configured nc_fname = None if self.name_file.nc_filerecord.get_data() is None: # update name file for netcdf input @@ -1387,6 +1384,8 @@ def write( nc_info = self.netcdf_info(mesh=mesh) nc_info["attrs"]["title"] = f"{self.name.upper()} input" nc_info["attrs"]["source"] = f"flopy {__version__}" + # :history = "first created 2025/8/21 9:46:2.909" ; + # :Conventions = "CF-1.11 UGRID-1.0" ; ds = self.update_dataset(ds, netcdf_info=nc_info, mesh=mesh) # write dataset to netcdf diff --git a/flopy/mf6/mfpackage.py b/flopy/mf6/mfpackage.py index da9c3d604d..c0b6db12f2 100644 --- a/flopy/mf6/mfpackage.py +++ b/flopy/mf6/mfpackage.py @@ -3649,7 +3649,7 @@ def netcdf_info(self, mesh=None): return entries def update_dataset(self, dataset, netcdf_info=None, mesh=None, update_data=True): - from ..discretization.vertexgrid import VertexGrid + from ..discretization.structuredgrid import StructuredGrid if netcdf_info is None: nc_info = self.netcdf_info(mesh=mesh) else: @@ -3658,35 +3658,32 @@ def update_dataset(self, dataset, netcdf_info=None, mesh=None, update_data=True) modelgrid = self.model_or_sim.modelgrid modeltime = self.model_or_sim.modeltime - if isinstance(modelgrid, VertexGrid): - dimmap = { - "time": sum(modeltime.nstp), - "z": modelgrid.nlay, - "nmesh_face": modelgrid.ncpl, - "ncpl": modelgrid.ncpl, - } - else: - dimmap = { - "time": sum(modeltime.nstp), - "z": modelgrid.nlay, - "y": modelgrid.nrow, - "x": modelgrid.ncol, - "nmesh_face": modelgrid.ncpl, - "ncpl": modelgrid.ncpl, - } - - def _update_data(nc_info, key, pobj=None, data=None): + dimmap = { + "time": sum(modeltime.nstp), + "z": modelgrid.nlay, + "nmesh_face": modelgrid.ncpl, + "ncpl": modelgrid.ncpl, + } + + if isinstance(modelgrid, StructuredGrid): + dimmap["y"] = modelgrid.nrow + dimmap["x"] = modelgrid.ncol + + def _update_data(nc_info, key, dobj=None, data=None): + from types import MappingProxyType if "modflow_iaux" in nc_info[key]["attrs"]: iaux = nc_info[key]["attrs"]["modflow_iaux"] - 1 else: iaux = -1 if mesh == None: - if pobj.repeating: + if dobj.repeating: if iaux >= 0: - for k in pobj._data_storage.keys(): + auxkeys = list(data) + for k in auxkeys: + if data[k] is None: + continue istp = sum(modeltime.nstp[0:k]) - pobj.get_data_prep(k) - auxdata = pobj._data_storage[k].get_data(iaux) + auxdata = data[k][iaux] dataset[nc_info[key]["varname"]].values[istp, :] = ( auxdata) else: @@ -3695,8 +3692,8 @@ def _update_data(nc_info, key, pobj=None, data=None): continue istp = sum(modeltime.nstp[0:per]) if ( - pobj.structure.data_item_structures[0].numeric_index or - pobj.structure.data_item_structures[0].is_cellid): + dobj.structure.data_item_structures[0].numeric_index or + dobj.structure.data_item_structures[0].is_cellid): dataset[nc_info[key]["varname"]].values[istp, :] = ( data[per] + 1) else: @@ -3709,19 +3706,19 @@ def _update_data(nc_info, key, pobj=None, data=None): layer = nc_info[key]["attrs"]["layer"] - 1 else: layer = -1 - if pobj.repeating: + if dobj.repeating: if iaux >= 0: - for k in pobj._data_storage.keys(): + auxkeys = list(data) + for k in auxkeys: + if data[k] is None: + continue + auxdata = data[k][iaux] istp = sum(modeltime.nstp[0:k]) - pobj.get_data_prep(k) - auxdata = pobj._data_storage[k].get_data(iaux) if self.structure.read_as_arrays: - uidx = istp + auxdata.size - dataset[nc_info[key]["varname"]].values[istp:uidx] = ( + dataset[nc_info[key]["varname"]].values[istp, :] = ( auxdata.flatten()) elif self.structure.read_array_grid: - uidx = istp + auxdata[layer].size - dataset[nc_info[key]["varname"]].values[istp:uidx] = ( + dataset[nc_info[key]["varname"]].values[istp, :] = ( auxdata[layer].flatten()) else: for per in data: @@ -3729,25 +3726,23 @@ def _update_data(nc_info, key, pobj=None, data=None): continue istp = sum(modeltime.nstp[0:per]) if layer >= 0 and ( - len(pobj.structure.shape) == 3 or - pobj.structure.shape[0] == 'nodes'): - uidx = istp + data[per][layer].size - dataset[nc_info[key]["varname"]].values[istp:uidx] = ( + len(dobj.structure.shape) == 3 or + dobj.structure.shape[0] == 'nodes'): + dataset[nc_info[key]["varname"]].values[istp, :] = ( data[per][layer].flatten()) else: - uidx = istp + data[per].size if ( - pobj.structure.data_item_structures[0].numeric_index or - pobj.structure.data_item_structures[0].is_cellid): - dataset[nc_info[key]["varname"]].values[istp:uidx] = ( + dobj.structure.data_item_structures[0].numeric_index or + dobj.structure.data_item_structures[0].is_cellid): + dataset[nc_info[key]["varname"]].values[istp, :] = ( data[per].flatten() + 1) else: - dataset[nc_info[key]["varname"]].values[istp:uidx] = ( + dataset[nc_info[key]["varname"]].values[istp, :] = ( data[per].flatten()) else: if layer >= 0 and ( - 'nlay' in pobj.structure.shape or - pobj.structure.shape[0] == 'nodes'): + 'nlay' in dobj.structure.shape or + dobj.structure.shape[0] == 'nodes'): dataset[nc_info[key]["varname"]].values = ( data[layer].flatten()) else: @@ -3763,7 +3758,7 @@ def _data_shape(shape): return dims_l last_path = '' - pkg = None + pitem = None pdata = None for v in nc_info: varname = nc_info[v]["varname"] @@ -3781,13 +3776,12 @@ def _data_shape(shape): path = nc_info[v]["attrs"]["modflow_input"] tag = path.split("/")[2].lower() if path != last_path: - pkg = None + pitem = None pdata = None - pkg = getattr(self, tag) - if tag != "aux": - pdata = pkg.get_data() + pitem = getattr(self, tag) + pdata = pitem.get_data() last_path = path - _update_data(nc_info, v, pkg, pdata) + _update_data(nc_info, v, pitem, pdata) return dataset diff --git a/flopy/mf6/mfsimbase.py b/flopy/mf6/mfsimbase.py index 0840a0e6a6..5ec4f4be44 100644 --- a/flopy/mf6/mfsimbase.py +++ b/flopy/mf6/mfsimbase.py @@ -1675,7 +1675,7 @@ def write_simulation( Writes out the simulation in silent mode (verbosity_level = 0) netcdf : str ASCII package files will be written as configured for NetCDF input. - 'layered' and 'structured' are supported types. + 'layered', 'structured' and 'nofile' are supported arguments. """ sim_data = self.simulation_data From 7baedf82725d2bf24e8c0b7b6de5476cfb868017 Mon Sep 17 00:00:00 2001 From: mjreno Date: Tue, 26 Aug 2025 17:29:27 -0400 Subject: [PATCH 26/44] generate classes from modflow --- .github/workflows/commit.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/commit.yml b/.github/workflows/commit.yml index 299b55e74d..e294c4cbb9 100644 --- a/.github/workflows/commit.yml +++ b/.github/workflows/commit.yml @@ -166,7 +166,6 @@ jobs: working-directory: flopy run: | pixi run --manifest-path=../modflow6/pixi.toml pip install --no-deps -e . - pixi run --manifest-path=../modflow6/pixi.toml python -m flopy.mf6.utils.generate_classes --dfnpath ../modflow6/doc/mf6io/mf6ivar/dfn - name: Build MF6 working-directory: modflow6 @@ -175,6 +174,10 @@ jobs: pixi run meson install -C builddir pixi run meson test --verbose --no-rebuild -C builddir + - name: Update FloPy packages + working-directory: modflow6 + run: python -m flopy.mf6.utils.generate_classes --dfnpath doc/mf6io/mf6ivar/dfn + - name: Run tests working-directory: flopy/autotest run: | From a201d2c1a231ec070b0458413d3127fa2b2a086c Mon Sep 17 00:00:00 2001 From: mjreno Date: Wed, 27 Aug 2025 07:51:54 -0400 Subject: [PATCH 27/44] temporariy mark slow --- autotest/test_mf6.py | 1 + autotest/tmp/test_mf6.1.py | 2937 ++++++++++++++++++++++++++++++++++++ autotest/tmp/test_mf6.py | 2930 +++++++++++++++++++++++++++++++++++ 3 files changed, 5868 insertions(+) create mode 100644 autotest/tmp/test_mf6.1.py create mode 100644 autotest/tmp/test_mf6.py diff --git a/autotest/test_mf6.py b/autotest/test_mf6.py index 4efffef3b3..42edfbff85 100644 --- a/autotest/test_mf6.py +++ b/autotest/test_mf6.py @@ -1963,6 +1963,7 @@ def test_array(function_tmpdir): @requires_exe("mf6") +@pytest.mark.slow def test_grid_array(function_tmpdir): # get_data # empty data in period block vs data repeating diff --git a/autotest/tmp/test_mf6.1.py b/autotest/tmp/test_mf6.1.py new file mode 100644 index 0000000000..b1b91b95b7 --- /dev/null +++ b/autotest/tmp/test_mf6.1.py @@ -0,0 +1,2937 @@ +import os +import platform +from pathlib import Path +from shutil import copytree, which + +import numpy as np +import pytest +from modflow_devtools.markers import requires_exe, requires_pkg +from modflow_devtools.misc import set_dir + +import flopy +from flopy.mf6 import ( + MFModel, + MFSimulation, + ModflowGwf, + ModflowGwfchd, + ModflowGwfdis, + ModflowGwfdisu, + ModflowGwfdisv, + ModflowGwfdrn, + ModflowGwfevt, + ModflowGwfevta, + ModflowGwfghb, + ModflowGwfgnc, + ModflowGwfgwf, + ModflowGwfgwt, + ModflowGwfhfb, + ModflowGwfic, + ModflowGwflak, + ModflowGwfmaw, + ModflowGwfmvr, + ModflowGwfnam, + ModflowGwfnpf, + ModflowGwfoc, + ModflowGwfrch, + ModflowGwfrcha, + ModflowGwfriv, + ModflowGwfsfr, + ModflowGwfsto, + ModflowGwfuzf, + ModflowGwfwel, + ModflowGwfwelg, + ModflowGwtadv, + ModflowGwtdis, + ModflowGwtic, + ModflowGwtmst, + ModflowGwtoc, + ModflowGwtssm, + ModflowIms, + ModflowNam, + ModflowTdis, + ModflowUtllaktab, + ModflowUtlspca, +) +from flopy.mf6.coordinates.modeldimensions import ( + DataDimensions, + ModelDimensions, + PackageDimensions, +) +from flopy.mf6.data.mffileaccess import MFFileAccessArray +from flopy.mf6.data.mfstructure import MFDataItemStructure, MFDataStructure +from flopy.mf6.mfsimbase import MFSimulationData +from flopy.mf6.modflow import ( + mfgwf, + mfgwfdis, + mfgwfdrn, + mfgwfic, + mfgwfnpf, + mfgwfoc, + mfgwfriv, + mfgwfsto, + mfgwfwel, + mfims, + mftdis, +) +from flopy.utils import CellBudgetFile, HeadFile, Mf6ListBudget, Mf6Obs, ZoneBudget6 +from flopy.utils.observationfile import CsvFile +from flopy.utils.triangle import Triangle +from flopy.utils.voronoi import VoronoiGrid + +pytestmark = pytest.mark.mf6 + + +def write_head( + fbin, + data, + kstp=1, + kper=1, + pertim=1.0, + totim=1.0, + text=" HEAD", + ilay=1, +): + dt = np.dtype( + [ + ("kstp", "i4"), + ("kper", "i4"), + ("pertim", "f8"), + ("totim", "f8"), + ("text", "S16"), + ("ncol", "i4"), + ("nrow", "i4"), + ("ilay", "i4"), + ] + ) + nrow = data.shape[0] + ncol = data.shape[1] + h = np.array((kstp, kper, pertim, totim, text, ncol, nrow, ilay), dtype=dt) + h.tofile(fbin) + data.tofile(fbin) + + +def get_gwf_model(sim, gwfname, gwfpath, modelshape, chdspd=None, welspd=None): + nlay, nrow, ncol = modelshape + delr = 1.0 + delc = 1.0 + top = 1.0 + botm = [0.0] + strt = 1.0 + hk = 1.0 + laytyp = 0 + + gwf = ModflowGwf( + sim, + modelname=gwfname, + save_flows=True, + ) + gwf.set_model_relative_path(gwfpath) + + dis = ModflowGwfdis( + gwf, + nlay=nlay, + nrow=nrow, + ncol=ncol, + delr=delr, + delc=delc, + top=top, + botm=botm, + ) + + # initial conditions + ic = ModflowGwfic(gwf, strt=strt) + + # node property flow + npf = ModflowGwfnpf( + gwf, + icelltype=laytyp, + k=hk, + save_specific_discharge=True, + ) + + # chd files + if chdspd is not None: + chd = ModflowGwfchd( + gwf, + stress_period_data=chdspd, + save_flows=False, + pname="CHD-1", + ) + + # wel files + if welspd is not None: + wel = ModflowGwfwel( + gwf, + print_input=True, + print_flows=True, + stress_period_data=welspd, + save_flows=False, + auxiliary="CONCENTRATION", + pname="WEL-1", + ) + + # output control + oc = ModflowGwfoc( + gwf, + budget_filerecord=f"{gwfname}.cbc", + head_filerecord=f"{gwfname}.hds", + headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")], + saverecord=[("HEAD", "LAST"), ("BUDGET", "LAST")], + printrecord=[("HEAD", "LAST"), ("BUDGET", "LAST")], + ) + return gwf + + +def get_gwt_model(sim, gwtname, gwtpath, modelshape, sourcerecarray=None): + nlay, nrow, ncol = modelshape + delr = 1.0 + delc = 1.0 + top = 1.0 + botm = [0.0] + strt = 1.0 + hk = 1.0 + laytyp = 0 + + gwt = MFModel( + sim, + model_type="gwt6", + modelname=gwtname, + model_rel_path=gwtpath, + ) + gwt.name_file.save_flows = True + + dis = ModflowGwtdis( + gwt, + nlay=nlay, + nrow=nrow, + ncol=ncol, + delr=delr, + delc=delc, + top=top, + botm=botm, + ) + + # initial conditions + ic = ModflowGwtic(gwt, strt=0.0) + + # advection + adv = ModflowGwtadv(gwt, scheme="upstream") + + # mass storage and transfer + mst = ModflowGwtmst(gwt, porosity=0.1) + + # sources + ssm = ModflowGwtssm(gwt, sources=sourcerecarray) + + # output control + oc = ModflowGwtoc( + gwt, + budget_filerecord=f"{gwtname}.cbc", + concentration_filerecord=f"{gwtname}.ucn", + concentrationprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")], + saverecord=[("CONCENTRATION", "LAST"), ("BUDGET", "LAST")], + printrecord=[("CONCENTRATION", "LAST"), ("BUDGET", "LAST")], + ) + return gwt + + +def to_win_sep(s): + return s.replace("/", "\\") + + +def to_posix_sep(s): + return s.replace("\\", "/") + + +def to_os_sep(s): + return s.replace("\\", os.sep).replace("/", os.sep) + + +@requires_exe("mf6") +def test_load_and_run_sim_when_namefile_uses_filenames( + function_tmpdir, example_data_path +): + # copy model input files to temp workspace + model_name = "mf6-freyberg" + workspace = function_tmpdir / model_name + copytree(example_data_path / model_name, workspace) + + # load, check and run simulation + sim = MFSimulation.load(sim_ws=workspace) + sim.check() + success, _ = sim.run_simulation(report=True) + assert success + + +@requires_exe("mf6") +def test_load_and_run_sim_when_namefile_uses_abs_paths( + function_tmpdir, example_data_path +): + # copy model input files to temp workspace + model_name = "freyberg" + workspace = function_tmpdir / "ws" + copytree(example_data_path / f"mf6-{model_name}", workspace) + + # sub abs paths into namefile + with set_dir(workspace): + nam_path = workspace / "mfsim.nam" + lines = open(nam_path).readlines() + with open(nam_path, "w") as f: + for l in lines: + pattern = f"{model_name}." + if pattern in l: + l = l.replace(pattern, str(workspace.absolute()) + os.sep + pattern) + f.write(l) + + # load, check and run simulation + sim = MFSimulation.load(sim_ws=workspace) + sim.check() + success, _ = sim.run_simulation(report=True) + assert success + + +@requires_exe("mf6") +@pytest.mark.parametrize("sep", ["win", "posix"]) +def test_load_sim_when_namefile_uses_rel_paths(function_tmpdir, example_data_path, sep): + # copy model input files to temp workspace + model_name = "freyberg" + workspace = function_tmpdir / "ws" + copytree(example_data_path / f"mf6-{model_name}", workspace) + + # sub rel paths into namefile + with set_dir(workspace): + nam_path = workspace / "mfsim.nam" + lines = open(nam_path).readlines() + with open(nam_path, "w") as f: + for l in lines: + pattern = f"{model_name}." + if pattern in l: + if sep == "win": + l = to_win_sep( + l.replace( + pattern, "../" + workspace.name + "/" + model_name + "." + ) + ) + else: + l = to_posix_sep( + l.replace( + pattern, "../" + workspace.name + "/" + model_name + "." + ) + ) + f.write(l) + + # load and check simulation + sim = MFSimulation.load(sim_ws=workspace) + sim.check() + + # don't run simulation with Windows sep on Linux or Mac + if sep == "win" and platform.system() != "Windows": + return + + # run simulation + success, _ = sim.run_simulation(report=True) + assert success + + +@pytest.mark.skip(reason="currently flopy uses OS-specific path separators") +@pytest.mark.parametrize("sep", ["win", "posix"]) +def test_write_simulation_always_writes_posix_path_separators( + function_tmpdir, example_data_path, sep +): + # copy model input files to temp workspace + model_name = "freyberg" + workspace = function_tmpdir / "ws" + copytree(example_data_path / f"mf6-{model_name}", workspace) + + # use OS-specific path separators + with set_dir(workspace): + nam_path = workspace / "mfsim.nam" + lines = open(nam_path).readlines() + with open(nam_path, "w") as f: + for l in lines: + pattern = f"{model_name}." + if pattern in l: + if sep == "win": + l = to_win_sep( + l.replace( + pattern, "../" + workspace.name + "/" + model_name + "." + ) + ) + else: + l = to_posix_sep( + l.replace( + pattern, "../" + workspace.name + "/" + model_name + "." + ) + ) + f.write(l) + + # load and write simulation + sim = MFSimulation.load(sim_ws=workspace) + sim.write_simulation() + + # make sure posix separators were written + lines = open(workspace / "mfsim.nam").readlines() + assert all("\\" not in l for l in lines) + + +@requires_exe("mf6") +@pytest.mark.parametrize("filename", ["name", "rel", "rel_win"]) +def test_basic_gwf(function_tmpdir, filename): + ws = function_tmpdir + name = "basic_gwf_prep" + sim = flopy.mf6.MFSimulation(sim_name=name, sim_ws=ws, exe_name="mf6") + pd = [(1.0, 1, 1.0), (1.0, 1, 1.0)] + + innerdir = Path(function_tmpdir / "inner") + innerdir.mkdir() + + # mfpackage filename can be path or string.. + # if string, it can either be a file name or + # path relative to the simulation workspace. + tdis_name = f"{name}.tdis" + tdis_path = innerdir / tdis_name + tdis_path.touch() + tdis_relpath = tdis_path.relative_to(ws).as_posix() + tdis_relpath_win = str(tdis_relpath).replace("/", "\\") + + if filename == "name": + # file named with no path will be created in simulation workspace + tdis = flopy.mf6.ModflowTdis( + sim, nper=len(pd), perioddata=pd, filename=tdis_name + ) + assert tdis.filename == tdis_name + elif filename == "rel": + # filename may be a relative pathlib.Path + tdis = flopy.mf6.ModflowTdis( + sim, nper=len(pd), perioddata=pd, filename=tdis_relpath + ) + assert tdis.filename == str(tdis_relpath) + + # relative paths may also be provided as strings + tdis = flopy.mf6.ModflowTdis( + sim, nper=len(pd), perioddata=pd, filename=str(tdis_relpath) + ) + assert tdis.filename == str(tdis_relpath) + elif filename == "rel_win": + # windows path backslash separator should be converted to forward slash + tdis = flopy.mf6.ModflowTdis( + sim, nper=len(pd), perioddata=pd, filename=tdis_relpath_win + ) + assert tdis.filename == str(tdis_relpath) + + # create other packages + ims = flopy.mf6.ModflowIms(sim) + gwf = flopy.mf6.ModflowGwf(sim, modelname=name, save_flows=True) + dis = flopy.mf6.ModflowGwfdis(gwf, nrow=10, ncol=10) + ic = flopy.mf6.ModflowGwfic(gwf) + npf = flopy.mf6.ModflowGwfnpf( + gwf, save_specific_discharge=True, save_saturation=True + ) + spd = { + 0: [[(0, 0, 0), 1.0, 1.0], [(0, 9, 9), 0.0, 0.0]], + 1: [[(0, 0, 0), 0.0, 0.0], [(0, 9, 9), 1.0, 2.0]], + } + chd = flopy.mf6.ModflowGwfchd( + gwf, pname="CHD-1", stress_period_data=spd, auxiliary=["concentration"] + ) + budget_file = f"{name}.bud" + head_file = f"{name}.hds" + oc = flopy.mf6.ModflowGwfoc( + gwf, + budget_filerecord=budget_file, + head_filerecord=head_file, + saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], + ) + + # write the simulation + sim.write_simulation() + + # check for input files + assert (ws / innerdir / tdis_name).is_file() + assert (ws / f"{name}.ims").is_file() + assert (ws / f"{name}.dis").is_file() + assert (ws / f"{name}.ic").is_file() + assert (ws / f"{name}.npf").is_file() + assert (ws / f"{name}.chd").is_file() + assert (ws / f"{name}.oc").is_file() + + # run the simulation + sim.run_simulation() + + # check for output files + assert (ws / budget_file).is_file() + assert (ws / head_file).is_file() + + +def test_subdir(function_tmpdir): + sim = MFSimulation(sim_ws=function_tmpdir) + assert sim.sim_path == function_tmpdir + + tdis = ModflowTdis(sim) + gwf = ModflowGwf(sim, model_rel_path="level2") + ims = ModflowIms(sim) + sim.register_ims_package(ims, []) + dis = ModflowGwfdis(gwf) + sim.set_all_data_external(external_data_folder="dat") + sim.write_simulation() + + sim_r = MFSimulation.load( + "mfsim.nam", + sim_ws=sim.simulation_data.mfpath.get_sim_path(), + ) + gwf_r = sim_r.get_model() + assert gwf.dis.delc.get_file_entry() == gwf_r.dis.delc.get_file_entry(), ( + "Something wrong with model external paths" + ) + + sim_r.set_all_data_internal() + sim_r.set_all_data_external(external_data_folder=os.path.join("dat", "dat_l2")) + sim_r.write_simulation() + + sim_r2 = MFSimulation.load( + "mfsim.nam", + sim_ws=sim_r.simulation_data.mfpath.get_sim_path(), + ) + gwf_r2 = sim_r.get_model() + assert gwf_r.dis.delc.get_file_entry() == gwf_r2.dis.delc.get_file_entry(), ( + "Something wrong with model external paths" + ) + + +@requires_exe("mf6") +@pytest.mark.parametrize("layered", [True, False]) +def test_binary_write(function_tmpdir, layered): + nlay, nrow, ncol = 2, 1, 10 + shape2d = (nrow, ncol) + + # data for layers + botm = [4.0, 0.0] + strt = [5.0, 10.0] + + # create binary data structured + if layered: + idomain_data = [] + botm_data = [] + strt_data = [] + for k in range(nlay): + idomain_data.append( + { + "factor": 1.0, + "filename": f"idomain_l{k + 1}.bin", + "data": 1, + "binary": True, + "iprn": 1, + } + ) + botm_data.append( + { + "filename": f"botm_l{k + 1}.bin", + "binary": True, + "iprn": 1, + "data": np.full(shape2d, botm[k], dtype=float), + } + ) + strt_data.append( + { + "filename": f"strt_l{k + 1}.bin", + "binary": True, + "iprn": 1, + "data": np.full(shape2d, strt[k], dtype=float), + } + ) + else: + idomain_data = { + "filename": "idomain.bin", + "binary": True, + "iprn": 1, + "data": 1, + } + botm_data = { + "filename": "botm.bin", + "binary": True, + "iprn": 1, + "data": np.array( + [ + np.full(shape2d, botm[0], dtype=float), + np.full(shape2d, botm[1], dtype=float), + ] + ), + } + strt_data = { + "filename": "strt.bin", + "binary": True, + "iprn": 1, + "data": np.array( + [ + np.full(shape2d, strt[0], dtype=float), + np.full(shape2d, strt[1], dtype=float), + ] + ), + } + + # binary data that does not vary by layers + top_data = { + "filename": "top.bin", + "binary": True, + "iprn": 1, + "data": 10.0, + } + rch_data = { + 0: { + "filename": "recharge.bin", + "binary": True, + "iprn": 1, + "data": 0.000001, + }, + } + chd_data = [ + (1, 0, 0, 10.0, 1.0, 100.0), + (1, 0, ncol - 1, 5.0, 0.0, 100.0), + ] + chd_data = { + 0: { + "filename": "chd.bin", + "binary": True, + "iprn": 1, + "data": chd_data, + }, + } + + sim = MFSimulation(sim_ws=str(function_tmpdir)) + ModflowTdis(sim) + ModflowIms(sim, complexity="simple") + gwf = ModflowGwf(sim, print_input=True) + ModflowGwfdis( + gwf, + nlay=nlay, + nrow=nrow, + ncol=ncol, + delr=1.0, + delc=1.0, + top=top_data, + botm=botm_data, + idomain=idomain_data, + ) + ModflowGwfnpf( + gwf, + icelltype=1, + ) + ModflowGwfic( + gwf, + strt=strt_data, + ) + ModflowGwfchd( + gwf, + auxiliary=["conc", "something"], + stress_period_data=chd_data, + ) + ModflowGwfrcha(gwf, recharge=rch_data) + + sim.write_simulation() + success, buff = sim.run_simulation() + assert success + + +@requires_exe("mf6") +@requires_pkg("shapely", "scipy") +@pytest.mark.parametrize("layered", [True, False]) +def test_vor_binary_write(function_tmpdir, layered): + # build voronoi grid + boundary = [(0.0, 0.0), (0.0, 1.0), (10.0, 1.0), (10.0, 0.0)] + triangle_ws = function_tmpdir / "triangle" + triangle_ws.mkdir(parents=True, exist_ok=True) + + tri = Triangle( + angle=30, + maximum_area=1.0, + model_ws=triangle_ws, + ) + tri.add_polygon(boundary) + tri.build(verbose=False) + vor = VoronoiGrid(tri) + + # problem dimensions + nlay = 2 + + # data for layers + botm = [4.0, 0.0] + strt = [5.0, 10.0] + + # build binary data + if layered: + idomain_data = [] + botm_data = [] + strt_data = [] + for k in range(nlay): + idomain_data.append( + { + "factor": 1.0, + "filename": f"idomain_l{k + 1}.bin", + "data": 1, + "binary": True, + "iprn": 1, + } + ) + botm_data.append( + { + "filename": f"botm_l{k + 1}.bin", + "binary": True, + "iprn": 1, + "data": np.full(vor.ncpl, botm[k], dtype=float), + } + ) + strt_data.append( + { + "filename": f"strt_l{k + 1}.bin", + "binary": True, + "iprn": 1, + "data": np.full(vor.ncpl, strt[k], dtype=float), + } + ) + else: + idomain_data = { + "filename": "idomain.bin", + "binary": True, + "iprn": 1, + "data": 1, + } + botm_data = { + "filename": "botm.bin", + "binary": True, + "iprn": 1, + "data": np.array( + [ + np.full(vor.ncpl, botm[0], dtype=float), + np.full(vor.ncpl, botm[1], dtype=float), + ] + ), + } + strt_data = { + "filename": "strt.bin", + "binary": True, + "iprn": 1, + "data": np.array( + [ + np.full(vor.ncpl, strt[0], dtype=float), + np.full(vor.ncpl, strt[1], dtype=float), + ] + ), + } + + # binary data that does not vary by layers + top_data = { + "filename": "top.bin", + "binary": True, + "iprn": 1, + "data": 10.0, + } + rch_data = { + 0: { + "filename": "recharge.bin", + "binary": True, + "iprn": 1, + "data": np.full(vor.ncpl, 0.000001, dtype=float), + }, + } + chd_data = [ + (1, 0, 10.0, 1.0, 100.0), + (1, 1, 10.0, 1.0, 100.0), + (1, 2, 5.0, 0.0, 100.0), + (1, 3, 5.0, 0.0, 100.0), + ] + chd_data = { + 0: { + "filename": "chd.bin", + "binary": True, + "data": chd_data, + }, + } + + # build model + sim = MFSimulation(sim_ws=str(function_tmpdir)) + ModflowTdis(sim) + ModflowIms(sim, complexity="simple") + gwf = ModflowGwf(sim, print_input=True) + flopy.mf6.ModflowGwfdisv( + gwf, + nlay=nlay, + ncpl=vor.ncpl, + nvert=vor.nverts, + vertices=vor.get_disv_gridprops()["vertices"], + cell2d=vor.get_disv_gridprops()["cell2d"], + top=top_data, + botm=botm_data, + idomain=idomain_data, + xorigin=0.0, + yorigin=0.0, + ) + ModflowGwfnpf( + gwf, + icelltype=1, + ) + ModflowGwfic( + gwf, + strt=strt_data, + ) + ModflowGwfrcha(gwf, recharge=rch_data) + ModflowGwfchd( + gwf, + auxiliary=["conc", "something"], + stress_period_data=chd_data, + ) + sim.write_simulation() + success, buff = sim.run_simulation() + assert success + + +def test_binary_read(function_tmpdir): + test_ex_name = "binary_read" + nlay = 3 + nrow = 10 + ncol = 10 + + modelgrid = flopy.discretization.StructuredGrid(nlay=nlay, nrow=nrow, ncol=ncol) + + arr = np.arange(nlay * nrow * ncol).astype(np.float64) + data_shape = (nlay, nrow, ncol) + data_size = nlay * nrow * ncol + arr.shape = data_shape + + sim_data = MFSimulationData("integration", None) + dstruct = MFDataItemStructure() + dstruct.is_cellid = False + dstruct.name = "fake" + dstruct.data_items = [ + None, + ] + mfstruct = MFDataStructure(dstruct, False, "ic", None) + mfstruct.data_item_structures = [ + dstruct, + ] + mfstruct.path = [ + "fake", + ] + + md = ModelDimensions("test", None) + pd = PackageDimensions([md], None, "integration") + dd = DataDimensions(pd, mfstruct) + + binfile = function_tmpdir / "structured_layered.hds" + with open(binfile, "wb") as foo: + for ix, a in enumerate(arr): + write_head(foo, a, ilay=ix) + + fa = MFFileAccessArray(mfstruct, dd, sim_data, None, None) + + # test path as both Path and str + for bf in [binfile, str(binfile)]: + arr2 = fa.read_binary_data_from_file( + bf, data_shape, data_size, np.float64, modelgrid + )[0] + + assert np.allclose(arr, arr2), ( + "Binary read for layered structured failed with " + + ("Path" if isinstance(binfile, Path) else "str") + ) + + binfile = function_tmpdir / "structured_flat.hds" + with open(binfile, "wb") as foo: + a = np.expand_dims(np.ravel(arr), axis=0) + write_head(foo, a, ilay=1) + + arr2 = fa.read_binary_data_from_file( + binfile, data_shape, data_size, np.float64, modelgrid + )[0] + + assert np.allclose(arr, arr2), "Binary read for flat Structured failed" + + ncpl = nrow * ncol + data_shape = (nlay, ncpl) + arr.shape = data_shape + modelgrid = flopy.discretization.VertexGrid(nlay=nlay, ncpl=ncpl) + + fa = MFFileAccessArray(mfstruct, dd, sim_data, None, None) + + binfile = function_tmpdir / "vertex_layered.hds" + with open(binfile, "wb") as foo: + tarr = arr.reshape((nlay, 1, ncpl)) + for ix, a in enumerate(tarr): + write_head(foo, a, ilay=ix) + + arr2 = fa.read_binary_data_from_file( + binfile, data_shape, data_size, np.float64, modelgrid + )[0] + + assert np.allclose(arr, arr2), "Binary read for layered Vertex failed" + + binfile = function_tmpdir / "vertex_flat.hds" + with open(binfile, "wb") as foo: + a = np.expand_dims(np.ravel(arr), axis=0) + write_head(foo, a, ilay=1) + + arr2 = fa.read_binary_data_from_file( + binfile, data_shape, data_size, np.float64, modelgrid + )[0] + + assert np.allclose(arr, arr2), "Binary read for flat Vertex failed" + + nlay = 3 + ncpl = [50, 100, 150] + data_shape = (np.sum(ncpl),) + arr.shape = data_shape + modelgrid = flopy.discretization.UnstructuredGrid(ncpl=ncpl) + + fa = MFFileAccessArray(mfstruct, dd, sim_data, None, None) + + binfile = function_tmpdir / "unstructured.hds" + with open(binfile, "wb") as foo: + a = np.expand_dims(arr, axis=0) + write_head(foo, a, ilay=1) + + arr2 = fa.read_binary_data_from_file( + binfile, data_shape, data_size, np.float64, modelgrid + )[0] + + assert np.allclose(arr, arr2), "Binary read for Unstructured failed" + + +@requires_exe("mf6") +def test_props_and_write(function_tmpdir): + # workspace as str + sim = MFSimulation(sim_ws=str(function_tmpdir)) + assert isinstance(sim, MFSimulation) + assert sim.simulation_data.mfpath.get_sim_path() == function_tmpdir == sim.sim_path + + # workspace as Path + sim = MFSimulation(sim_ws=function_tmpdir) + assert isinstance(sim, MFSimulation) + assert sim.simulation_data.mfpath.get_sim_path() == function_tmpdir == sim.sim_path + + tdis = ModflowTdis(sim) + assert isinstance(tdis, ModflowTdis) + + gwfgwf = ModflowGwfgwf(sim, exgtype="gwf6-gwf6", exgmnamea="gwf1", exgmnameb="gwf2") + assert isinstance(gwfgwf, ModflowGwfgwf) + + gwf = ModflowGwf(sim) + assert isinstance(gwf, ModflowGwf) + + ims = ModflowIms(sim) + assert isinstance(ims, ModflowIms) + sim.register_ims_package(ims, []) + + dis = ModflowGwfdis(gwf) + assert isinstance(dis, ModflowGwfdis) + + disu = ModflowGwfdisu(gwf) + assert isinstance(disu, ModflowGwfdisu) + + disv = ModflowGwfdisv(gwf) + assert isinstance(disv, ModflowGwfdisv) + + npf = ModflowGwfnpf(gwf) + assert isinstance(npf, ModflowGwfnpf) + + ic = ModflowGwfic(gwf) + assert isinstance(ic, ModflowGwfic) + + sto = ModflowGwfsto(gwf) + assert isinstance(sto, ModflowGwfsto) + + hfb = ModflowGwfhfb(gwf) + assert isinstance(hfb, ModflowGwfhfb) + + gnc = ModflowGwfgnc(gwf) + assert isinstance(gnc, ModflowGwfgnc) + + chd = ModflowGwfchd(gwf) + assert isinstance(chd, ModflowGwfchd) + + wel = ModflowGwfwel(gwf) + assert isinstance(wel, ModflowGwfwel) + + drn = ModflowGwfdrn(gwf) + assert isinstance(drn, ModflowGwfdrn) + + riv = ModflowGwfriv(gwf) + assert isinstance(riv, ModflowGwfriv) + + ghb = ModflowGwfghb(gwf) + assert isinstance(ghb, ModflowGwfghb) + + rch = ModflowGwfrch(gwf) + assert isinstance(rch, ModflowGwfrch) + + rcha = ModflowGwfrcha(gwf) + assert isinstance(rcha, ModflowGwfrcha) + + evt = ModflowGwfevt(gwf) + assert isinstance(evt, ModflowGwfevt) + + evta = ModflowGwfevta(gwf) + assert isinstance(evta, ModflowGwfevta) + + maw = ModflowGwfmaw(gwf) + assert isinstance(maw, ModflowGwfmaw) + + sfr = ModflowGwfsfr(gwf) + assert isinstance(sfr, ModflowGwfsfr) + + lak = ModflowGwflak(gwf) + assert isinstance(lak, ModflowGwflak) + + uzf = ModflowGwfuzf(gwf) + assert isinstance(uzf, ModflowGwfuzf) + + mvr = ModflowGwfmvr(gwf) + assert isinstance(mvr, ModflowGwfmvr) + + # Write files + sim.write_simulation() + + # Verify files were written + assert os.path.isfile(os.path.join(str(function_tmpdir), "mfsim.nam")) + exts_model = [ + "nam", + "dis", + "disu", + "disv", + "npf", + "ic", + "sto", + "hfb", + "gnc", + "chd", + "wel", + "drn", + "riv", + "ghb", + "rch", + "rcha", + "evt", + "evta", + "maw", + "sfr", + "lak", + "mvr", + ] + exts_sim = ["gwfgwf", "ims", "tdis"] + for ext in exts_model: + fname = os.path.join(str(function_tmpdir), f"model.{ext}") + assert os.path.isfile(fname), f"{fname} not found" + for ext in exts_sim: + fname = os.path.join(str(function_tmpdir), f"sim.{ext}") + assert os.path.isfile(fname), f"{fname} not found" + + +@pytest.mark.parametrize("use_paths", [True, False]) +def test_set_sim_path(function_tmpdir, use_paths): + sim_name = "testsim" + model_name = "testmodel" + exe_name = "mf6" + + # set up simulation + tdis_name = f"{sim_name}.tdis" + sim = MFSimulation( + sim_name=sim_name, + version="mf6", + exe_name=exe_name, + sim_ws=function_tmpdir, + ) + + new_ws = function_tmpdir / "new_ws" + new_ws.mkdir() + sim.set_sim_path(new_ws if use_paths else str(new_ws)) + + tdis_rc = [(6.0, 2, 1.0), (6.0, 3, 1.0)] + tdis = mftdis.ModflowTdis(sim, time_units="DAYS", nper=2, perioddata=tdis_rc) + + # create model instance + model = mfgwf.ModflowGwf( + sim, modelname=model_name, model_nam_file=f"{model_name}.nam" + ) + + sim.write_simulation() + + assert len([p for p in function_tmpdir.glob("*") if p.is_file()]) == 0 + assert len([p for p in new_ws.glob("*") if p.is_file()]) > 0 + + +@requires_exe("mf6") +@pytest.mark.parametrize("use_paths", [True, False]) +def test_create_and_run_model(function_tmpdir, use_paths): + # names + sim_name = "testsim" + model_name = "testmodel" + exe_name = "mf6" + + # set up simulation + tdis_name = f"{sim_name}.tdis" + if use_paths: + sim = MFSimulation( + sim_name=sim_name, + version="mf6", + exe_name=Path(which(exe_name)), + sim_ws=function_tmpdir, + ) + else: + sim = MFSimulation( + sim_name=sim_name, + version="mf6", + exe_name=str(exe_name), + sim_ws=str(function_tmpdir), + ) + tdis_rc = [(6.0, 2, 1.0), (6.0, 3, 1.0)] + tdis = mftdis.ModflowTdis(sim, time_units="DAYS", nper=2, perioddata=tdis_rc) + + # create model instance + model = mfgwf.ModflowGwf( + sim, modelname=model_name, model_nam_file=f"{model_name}.nam" + ) + + # create solution and add the model + ims_package = mfims.ModflowIms( + sim, + print_option="ALL", + complexity="SIMPLE", + outer_dvclose=0.00001, + outer_maximum=50, + under_relaxation="NONE", + inner_maximum=30, + inner_dvclose=0.00001, + linear_acceleration="CG", + preconditioner_levels=7, + preconditioner_drop_tolerance=0.01, + number_orthogonalizations=2, + ) + sim.register_ims_package(ims_package, [model_name]) + + # add packages to model + dis_package = mfgwfdis.ModflowGwfdis( + model, + length_units="FEET", + nlay=1, + nrow=1, + ncol=10, + delr=500.0, + delc=500.0, + top=100.0, + botm=50.0, + filename=f"{model_name}.dis", + ) + ic_package = mfgwfic.ModflowGwfic( + model, + strt=[100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0], + filename=f"{model_name}.ic", + ) + npf_package = mfgwfnpf.ModflowGwfnpf(model, save_flows=True, icelltype=1, k=100.0) + + sto_package = mfgwfsto.ModflowGwfsto( + model, save_flows=True, iconvert=1, ss=0.000001, sy=0.15 + ) + + wel_package = mfgwfwel.ModflowGwfwel( + model, + print_input=True, + print_flows=True, + save_flows=True, + maxbound=2, + stress_period_data=[((0, 0, 4), -2000.0), ((0, 0, 7), -2.0)], + ) + wel_package.stress_period_data.add_transient_key(1) + wel_package.stress_period_data.set_data([((0, 0, 4), -200.0)], 1) + + drn_package = mfgwfdrn.ModflowGwfdrn( + model, + print_input=True, + print_flows=True, + save_flows=True, + maxbound=1, + stress_period_data=[((0, 0, 0), 80, 60.0)], + ) + + riv_package = mfgwfriv.ModflowGwfriv( + model, + print_input=True, + print_flows=True, + save_flows=True, + maxbound=1, + stress_period_data=[((0, 0, 9), 110, 90.0, 100.0)], + ) + oc_package = mfgwfoc.ModflowGwfoc( + model, + budget_filerecord=[f"{model_name}.cbc"], + head_filerecord=[f"{model_name}.hds"], + saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], + printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], + ) + oc_package.saverecord.add_transient_key(1) + oc_package.saverecord.set_data([("HEAD", "ALL"), ("BUDGET", "ALL")], 1) + oc_package.printrecord.add_transient_key(1) + oc_package.printrecord.set_data([("HEAD", "ALL"), ("BUDGET", "ALL")], 1) + + # write the simulation input files + sim.write_simulation() + + # run the simulation and look for output + success, buff = sim.run_simulation() + assert success + + +@requires_exe("mf6") +def test_get_set_data_record(function_tmpdir): + # names + sim_name = "testrecordsim" + model_name = "testrecordmodel" + exe_name = "mf6" + + # set up simulation + tdis_name = f"{sim_name}.tdis" + sim = MFSimulation( + sim_name=sim_name, + version="mf6", + exe_name=exe_name, + sim_ws=str(function_tmpdir), + ) + tdis_rc = [(10.0, 4, 1.0), (6.0, 3, 1.0)] + tdis = mftdis.ModflowTdis(sim, time_units="DAYS", nper=2, perioddata=tdis_rc) + + # create model instance + model = mfgwf.ModflowGwf( + sim, modelname=model_name, model_nam_file=f"{model_name}.nam" + ) + + # create solution and add the model + ims_package = mfims.ModflowIms( + sim, + print_option="ALL", + complexity="SIMPLE", + outer_dvclose=0.00001, + outer_maximum=50, + under_relaxation="NONE", + inner_maximum=30, + inner_dvclose=0.00001, + linear_acceleration="CG", + preconditioner_levels=7, + preconditioner_drop_tolerance=0.01, + number_orthogonalizations=2, + ) + sim.register_ims_package(ims_package, [model_name]) + + # add packages to model + dis_package = mfgwfdis.ModflowGwfdis( + model, + length_units="FEET", + nlay=3, + nrow=10, + ncol=10, + delr=500.0, + delc=500.0, + top=100.0, + botm=[50.0, 10.0, -50.0], + filename=f"{model_name}.dis", + ) + ic_package = mfgwfic.ModflowGwfic( + model, + strt=[100.0, 90.0, 80.0], + filename=f"{model_name}.ic", + ) + npf_package = mfgwfnpf.ModflowGwfnpf( + model, save_flows=True, icelltype=1, k=50.0, k33=1.0 + ) + + sto_package = mfgwfsto.ModflowGwfsto( + model, save_flows=True, iconvert=1, ss=0.000001, sy=0.15 + ) + # wel packages + period_one = ModflowGwfwel.stress_period_data.empty( + model, + maxbound=3, + aux_vars=["var1", "var2", "var3"], + boundnames=True, + timeseries=True, + ) + period_one[0][0] = ((0, 9, 2), -50.0, -1, -2, -3, None) + period_one[0][1] = ((1, 4, 7), -100.0, 1, 2, 3, "well_1") + period_one[0][2] = ((1, 3, 2), -20.0, 4, 5, 6, "well_2") + period_two = ModflowGwfwel.stress_period_data.empty( + model, + maxbound=2, + aux_vars=["var1", "var2", "var3"], + boundnames=True, + timeseries=True, + ) + period_two[0][0] = ((2, 3, 2), -80.0, 1, 2, 3, "well_2") + period_two[0][1] = ((2, 4, 7), -10.0, 4, 5, 6, "well_1") + stress_period_data = {} + stress_period_data[0] = period_one[0] + stress_period_data[1] = period_two[0] + wel_package = ModflowGwfwel( + model, + print_input=True, + print_flows=True, + auxiliary=[("var1", "var2", "var3")], + maxbound=5, + stress_period_data=stress_period_data, + boundnames=True, + save_flows=True, + ) + # rch package + rch_period_list = [] + for row in range(0, 10): + for col in range(0, 10): + rch_amt = (1 + row / 10) * (1 + col / 10) + rch_period_list.append(((0, row, col), rch_amt, 0.5)) + rch_period = {} + rch_period[0] = rch_period_list + rch_package = ModflowGwfrch( + model, + fixed_cell=True, + auxiliary="MULTIPLIER", + auxmultname="MULTIPLIER", + print_input=True, + print_flows=True, + save_flows=True, + maxbound=54, + stress_period_data=rch_period, + ) + + # write simulation to new location + sim.set_all_data_external() + sim.write_simulation() + + # test get_record, set_record for list data + wel = model.get_package("wel") + spd_record = wel.stress_period_data.get_record() + well_sp_1 = spd_record[0] + assert well_sp_1["filename"] == "testrecordmodel.wel_stress_period_data_1.txt" + assert well_sp_1["binary"] is False + assert well_sp_1["data"][0][0] == (0, 9, 2) + assert well_sp_1["data"][0][1] == -50.0 + # modify + del well_sp_1["filename"] + well_sp_1["data"][0][0] = (1, 9, 2) + well_sp_2 = spd_record[1] + del well_sp_2["filename"] + well_sp_2["data"][0][0] = (1, 1, 1) + # save + spd_record[0] = well_sp_1 + spd_record[1] = well_sp_2 + wel.stress_period_data.set_record(spd_record) + # verify changes + spd_record = wel.stress_period_data.get_record() + well_sp_1 = spd_record[0] + assert "filename" not in well_sp_1 + assert well_sp_1["data"][0][0] == (1, 9, 2) + assert well_sp_1["data"][0][1] == -50.0 + well_sp_2 = spd_record[1] + assert "filename" not in well_sp_2 + assert well_sp_2["data"][0][0] == (1, 1, 1) + spd = wel.stress_period_data.get_data() + assert spd[0][0][0] == (1, 9, 2) + # change well_sp_2 back to external + well_sp_2["filename"] = "wel_spd_data_2.txt" + spd_record[1] = well_sp_2 + wel.stress_period_data.set_record(spd_record) + # change well_sp_2 data + spd[1][0][0] = (1, 2, 2) + wel.stress_period_data.set_data(spd) + # verify changes + spd_record = wel.stress_period_data.get_record() + well_sp_2 = spd_record[1] + assert well_sp_2["filename"] == "wel_spd_data_2.txt" + assert well_sp_2["data"][0][0] == (1, 2, 2) + + # test get_data/set_data vs get_record/set_record + dis = model.get_package("dis") + botm = dis.botm.get_record() + assert len(botm) == 3 + layer_2 = botm[1] + layer_3 = botm[2] + # verify layer 2 + assert layer_2["filename"] == "testrecordmodel.dis_botm_layer2.txt" + assert layer_2["binary"] is False + assert layer_2["factor"] == 1.0 + assert layer_2["iprn"] is None + assert layer_2["data"][0][0] == 10.0 + # change and set layer 2 + layer_2["filename"] = "botm_layer2.txt" + layer_2["binary"] = True + layer_2["iprn"] = 3 + layer_2["factor"] = 2.0 + layer_2["data"] = layer_2["data"] * 0.5 + botm[1] = layer_2 + # change and set layer 3 + del layer_3["filename"] + layer_3["factor"] = 0.5 + layer_3["data"] = layer_3["data"] * 2.0 + botm[2] = layer_3 + dis.botm.set_record(botm) + + # get botm in two different ways, verifying changes made + botm_record = dis.botm.get_record() + layer_1 = botm_record[0] + assert layer_1["filename"] == "testrecordmodel.dis_botm_layer1.txt" + assert layer_1["binary"] is False + assert layer_1["iprn"] is None + assert layer_1["data"][0][0] == 50.0 + layer_2 = botm_record[1] + assert layer_2["filename"] == "botm_layer2.txt" + assert layer_2["binary"] is True + assert layer_2["factor"] == 2.0 + assert layer_2["iprn"] == 3 + assert layer_2["data"][0][0] == 5.0 + layer_3 = botm_record[2] + assert "filename" not in layer_3 + assert layer_3["factor"] == 0.5 + assert layer_3["data"][0][0] == -100.0 + botm_data = dis.botm.get_data(apply_mult=True) + assert botm_data[0][0][0] == 50.0 + assert botm_data[1][0][0] == 10.0 + assert botm_data[2][0][0] == -50.0 + botm_data = dis.botm.get_data() + assert botm_data[0][0][0] == 50.0 + assert botm_data[1][0][0] == 5.0 + assert botm_data[2][0][0] == -100.0 + # modify and set botm data with set_data + botm_data[0][0][0] = 6.0 + botm_data[1][0][0] = -8.0 + botm_data[2][0][0] = -205.0 + dis.botm.set_data(botm_data) + # verify that data changed and metadata did not change + botm_record = dis.botm.get_record() + layer_1 = botm_record[0] + assert layer_1["filename"] == "testrecordmodel.dis_botm_layer1.txt" + assert layer_1["binary"] is False + assert layer_1["iprn"] is None + assert layer_1["data"][0][0] == 6.0 + assert layer_1["data"][0][1] == 50.0 + layer_2 = botm_record[1] + assert layer_2["filename"] == "botm_layer2.txt" + assert layer_2["binary"] is True + assert layer_2["factor"] == 2.0 + assert layer_2["iprn"] == 3 + assert layer_2["data"][0][0] == -8.0 + assert layer_2["data"][0][1] == 5.0 + layer_3 = botm_record[2] + assert "filename" not in layer_3 + assert layer_3["factor"] == 0.5 + assert layer_3["data"][0][0] == -205.0 + botm_data = dis.botm.get_data() + assert botm_data[0][0][0] == 6.0 + assert botm_data[1][0][0] == -8.0 + assert botm_data[2][0][0] == -205.0 + + spd_record = rch_package.stress_period_data.get_record() + assert 0 in spd_record + assert isinstance(spd_record[0], dict) + assert "filename" in spd_record[0] + assert spd_record[0]["filename"] == "testrecordmodel.rch_stress_period_data_1.txt" + assert "binary" in spd_record[0] + assert spd_record[0]["binary"] is False + assert "data" in spd_record[0] + assert spd_record[0]["data"][0][0] == (0, 0, 0) + spd_record[0]["data"][0][0] = (0, 0, 8) + rch_package.stress_period_data.set_record(spd_record) + + spd_data = rch_package.stress_period_data.get_data() + assert spd_data[0][0][0] == (0, 0, 8) + spd_data[0][0][0] = (0, 0, 7) + rch_package.stress_period_data.set_data(spd_data) + + spd_record = rch_package.stress_period_data.get_record() + assert isinstance(spd_record[0], dict) + assert "filename" in spd_record[0] + assert spd_record[0]["filename"] == "testrecordmodel.rch_stress_period_data_1.txt" + assert "binary" in spd_record[0] + assert spd_record[0]["binary"] is False + assert "data" in spd_record[0] + assert spd_record[0]["data"][0][0] == (0, 0, 7) + + sim.write_simulation() + + +@requires_exe("mf6") +def test_output(function_tmpdir, example_data_path): + ex_name = "test001e_UZF_3lay" + sim_ws = example_data_path / "mf6" / ex_name + sim = MFSimulation.load(sim_ws=sim_ws, exe_name="mf6") + sim.set_sim_path(str(function_tmpdir)) + sim.write_simulation() + success, buff = sim.run_simulation() + assert success, f"simulation {sim.name} did not run" + + ml = sim.get_model("gwf_1") + + bud = ml.oc.output.budget() + budcsv = ml.oc.output.budgetcsv() + assert budcsv.file.closed + hds = ml.oc.output.head() + lst = ml.oc.output.list() + + idomain = np.ones(ml.modelgrid.shape, dtype=int) + zonbud = ml.oc.output.zonebudget(idomain) + + assert isinstance(bud, CellBudgetFile) + assert isinstance(budcsv, CsvFile) + assert isinstance(hds, HeadFile) + assert isinstance(zonbud, ZoneBudget6) + assert isinstance(lst, Mf6ListBudget) + + bud = ml.output.budget() + budcsv = ml.output.budgetcsv() + hds = ml.output.head() + zonbud = ml.output.zonebudget(idomain) + lst = ml.output.list() + + assert isinstance(bud, CellBudgetFile) + assert isinstance(budcsv, CsvFile) + assert isinstance(hds, HeadFile) + assert isinstance(zonbud, ZoneBudget6) + assert isinstance(lst, Mf6ListBudget) + + uzf = ml.uzf + uzf_bud = uzf.output.budget() + uzf_budcsv = uzf.output.budgetcsv() + conv = uzf.output.package_convergence() + uzf_obs = uzf.output.obs() + uzf_zonbud = uzf.output.zonebudget(idomain) + + assert isinstance(uzf_bud, CellBudgetFile) + assert isinstance(uzf_budcsv, CsvFile) + if conv is not None: + assert isinstance(conv, CsvFile) + assert isinstance(uzf_obs, Mf6Obs) + assert isinstance(uzf_zonbud, ZoneBudget6) + assert ml.dis.output.methods() is None + + +@requires_exe("mf6") +@pytest.mark.slow +def test_output_add_observation(function_tmpdir, example_data_path): + model_name = "lakeex2a" + sim_ws = str(example_data_path / "mf6" / "test045_lake2tr") + sim = MFSimulation.load(sim_ws=sim_ws, exe_name="mf6") + gwf = sim.get_model(model_name) + + # remove sfr_obs and add a new sfr obs + sfr = gwf.sfr + obs_file = f"{model_name}.sfr.obs" + csv_file = f"{obs_file}.csv" + obs_dict = { + csv_file: [ + ("l08_stage", "stage", (8,)), + ("l09_stage", "stage", (9,)), + ("l14_stage", "stage", (14,)), + ("l15_stage", "stage", (15,)), + ] + } + gwf.sfr.obs.initialize( + filename=obs_file, digits=10, print_input=True, continuous=obs_dict + ) + + sim.set_sim_path(str(function_tmpdir)) + sim.write_simulation() + + success, buff = sim.run_simulation() + assert success, f"simulation {sim.name} did not run" + + # check that .output finds the newly added OBS package + sfr_obs = gwf.sfr.output.obs() + + assert isinstance(sfr_obs, Mf6Obs), ( + "remove and add observation test (Mf6Output) failed" + ) + + +@requires_exe("mf6") +def test_sfr_connections(function_tmpdir, example_data_path): + """MODFLOW just warns if any reaches are unconnected + flopy fails to load model if reach 1 is unconnected, fine with other unconnected + """ + + data_path = example_data_path / "mf6" / "test666_sfrconnections" + sim_ws = function_tmpdir + for test in ["sfr0", "sfr1"]: + sim_name = "test_sfr" + model_name = "test_sfr" + tdis_name = f"{sim_name}.tdis" + sim = MFSimulation( + sim_name=sim_name, version="mf6", exe_name="mf6", sim_ws=sim_ws + ) + tdis_rc = [(1.0, 1, 1.0)] + tdis = ModflowTdis(sim, time_units="DAYS", nper=1, perioddata=tdis_rc) + ims_package = ModflowIms( + sim, + pname="my_ims_file", + filename=f"{sim_name}.ims", + print_option="ALL", + complexity="SIMPLE", + ) + model = ModflowGwf( + sim, modelname=model_name, model_nam_file=f"{model_name}.nam" + ) + + dis = ModflowGwfdis( + model, + length_units="FEET", + nlay=1, + nrow=5, + ncol=5, + delr=5000.0, + delc=5000.0, + top=100.0, + botm=-100.0, + filename=f"{model_name}.dis", + ) + ic_package = ModflowGwfic(model, filename=f"{model_name}.ic") + npf_package = ModflowGwfnpf( + model, + pname="npf", + save_flows=True, + alternative_cell_averaging="logarithmic", + icelltype=1, + k=50.0, + ) + + cnfile = f"mf6_{test}_connection.txt" + pkfile = f"mf6_{test}_package.txt" + + with open(data_path / pkfile, "r") as f: + nreaches = len(f.readlines()) + sfr = ModflowGwfsfr( + model, + packagedata={"filename": str(data_path / pkfile)}, + connectiondata={"filename": str(data_path / cnfile)}, + nreaches=nreaches, + pname="sfr", + unit_conversion=86400, + ) + sim.set_all_data_external() + sim.write_simulation() + success, buff = sim.run_simulation() + assert success, f"simulation {sim.name} did not run" + + # reload simulation + sim2 = MFSimulation.load(sim_ws=sim_ws) + sim2.set_all_data_external() + sim2.write_simulation() + success, buff = sim2.run_simulation() + assert success, f"simulation {sim2.name} did not run after being reloaded" + + # test sfr recarray data + model2 = sim2.get_model() + sfr2 = model2.get_package("sfr") + sfr_pd = sfr2.packagedata + rec_data = [ + (0, 0, 0, 0, 1.0, 1.0, 0.01, 10.0, 1.0, 1.0, 1.0, 1, 1.0, 0), + (1, 0, 1, 0, 1.0, 1.0, 0.01, 10.0, 1.0, 1.0, 1.0, 2, 1.0, 0), + ] + rec_type = [ + ("ifno", int), + ("layer", int), + ("row", int), + ("column", int), + ("rlen", float), + ("rwid", float), + ("rgrd", float), + ("rtp", float), + ("rbth", float), + ("rhk", float), + ("man", float), + ("nconn", int), + ("ustrf", float), + ("nvd", int), + ] + pkg_data = np.rec.array(rec_data, rec_type) + sfr_pd.set_record({"data": pkg_data}) + data = sfr_pd.get_data() + assert data[0][1] == (0, 0, 0) + + +@requires_exe("mf6") +def test_array(function_tmpdir): + # get_data + # empty data in period block vs data repeating + # array + # aux values, test that they work the same as other arrays (is a value + # of zero always used even if aux is defined in a previous stress + # period?) + + sim_name = "test_array" + model_name = "test_array" + out_dir = function_tmpdir + tdis_name = f"{sim_name}.tdis" + sim = MFSimulation(sim_name=sim_name, version="mf6", exe_name="mf6", sim_ws=out_dir) + tdis_rc = [(6.0, 2, 1.0), (6.0, 3, 1.0), (6.0, 3, 1.0), (6.0, 3, 1.0)] + tdis = ModflowTdis(sim, time_units="DAYS", nper=4, perioddata=tdis_rc) + ims_package = ModflowIms( + sim, + pname="my_ims_file", + filename=f"{sim_name}.ims", + print_option="ALL", + complexity="SIMPLE", + outer_dvclose=0.0001, + outer_maximum=50, + under_relaxation="NONE", + inner_maximum=30, + inner_dvclose=0.0001, + linear_acceleration="CG", + preconditioner_levels=7, + preconditioner_drop_tolerance=0.01, + number_orthogonalizations=2, + ) + model = ModflowGwf(sim, modelname=model_name, model_nam_file=f"{model_name}.nam") + + dis = ModflowGwfdis( + model, + length_units="FEET", + nlay=4, + nrow=2, + ncol=2, + delr=5000.0, + delc=5000.0, + top=100.0, + botm=[50.0, 0.0, -50.0, -100.0], + filename=f"{model_name} 1.dis", + ) + ic_package = ModflowGwfic(model, strt=90.0, filename=f"{model_name}.ic") + npf_package = ModflowGwfnpf( + model, + pname="npf_1", + save_flows=True, + alternative_cell_averaging="logarithmic", + icelltype=1, + k=50.0, + ) + + oc_package = ModflowGwfoc( + model, + budget_filerecord=[("test_array.cbc",)], + head_filerecord=[("test_array.hds",)], + saverecord={ + 0: [("HEAD", "ALL"), ("BUDGET", "ALL")], + 1: [], + }, + printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], + ) + + aux = {1: [[50.0], [1.3]], 3: [[200.0], [1.5]]} + irch = {1: [[0, 2], [2, 1]], 2: [[0, 1], [2, 3]]} + rcha = ModflowGwfrcha( + model, + print_input=True, + print_flows=True, + auxiliary=[("var1", "var2")], + irch=irch, + recharge={1: 0.0001, 2: 0.00001}, + aux=aux, + ) + val_irch = rcha.irch.array.sum(axis=(1, 2, 3)) + assert val_irch[0] == 4 + assert val_irch[1] == 5 + assert val_irch[2] == 6 + assert val_irch[3] == 6 + val_irch_2 = rcha.irch.get_data() + assert val_irch_2[0] is None + assert val_irch_2[1][1, 1] == 1 + assert val_irch_2[2][1, 1] == 3 + assert val_irch_2[3] is None + val_irch_2_3 = rcha.irch.get_data(3) + assert val_irch_2_3 is None + val_rch = rcha.recharge.array.sum(axis=(1, 2, 3)) + assert val_rch[0] == 0.0 + assert val_rch[1] == 0.0004 + assert val_rch[2] == 0.00004 + assert val_rch[3] == 0.00004 + val_rch_2 = rcha.recharge.get_data() + assert val_rch_2[0] is None + assert val_rch_2[1][0, 0] == 0.0001 + assert val_rch_2[2][0, 0] == 0.00001 + assert val_rch_2[3] is None + aux_data_0 = rcha.aux.get_data(0) + assert aux_data_0 is None + aux_data_1 = rcha.aux.get_data(1) + assert aux_data_1[0][0][0] == 50.0 + aux_data_2 = rcha.aux.get_data(2) + assert aux_data_2 is None + aux_data_3 = rcha.aux.get_data(3) + assert aux_data_3[0][0][0] == 200.0 + + welspdict = {1: [[(0, 0, 0), 0.25, 0.0]], 2: [[(0, 0, 0), 0.1, 0.0]]} + wel = ModflowGwfwel( + model, + print_input=True, + print_flows=True, + mover=True, + stress_period_data=welspdict, + save_flows=False, + auxiliary="CONCENTRATION", + pname="WEL-1", + ) + wel_array = wel.stress_period_data.array + #print(type(wel.stress_period_data)) + #print(type(wel.stress_period_data.array)) + assert wel_array[0] is None + assert wel_array[1][0][1] == 0.25 + assert wel_array[2][0][1] == 0.1 + assert wel_array[3][0][1] == 0.1 + + drnspdict = { + 0: [[(0, 0, 0), 60.0, 10.0]], + 2: [], + 3: [[(0, 0, 0), 55.0, 5.0]], + } + drn = ModflowGwfdrn( + model, + print_input=True, + print_flows=True, + stress_period_data=drnspdict, + save_flows=False, + pname="DRN-1", + ) + drn_array = drn.stress_period_data.array + assert drn_array[0][0][1] == 60.0 + assert drn_array[1][0][1] == 60.0 + assert drn_array[2] is None + assert drn_array[3][0][1] == 55.0 + drn_gd_0 = drn.stress_period_data.get_data(0) + assert drn_gd_0[0][1] == 60.0 + drn_gd_1 = drn.stress_period_data.get_data(1) + assert drn_gd_1 is None + drn_gd_2 = drn.stress_period_data.get_data(2) + assert len(drn_gd_2) == 0 + drn_gd_3 = drn.stress_period_data.get_data(3) + assert drn_gd_3[0][1] == 55.0 + + ghbspdict = { + 0: [[(0, 1, 1), 60.0, 10.0]], + } + ghb = ModflowGwfghb( + model, + print_input=True, + print_flows=True, + stress_period_data=ghbspdict, + save_flows=False, + pname="GHB-1", + ) + + lakpd = [(0, 70.0, 1), (1, 65.0, 1)] + lakecn = [ + (0, 0, (0, 0, 0), "HORIZONTAL", 1.0, 60.0, 90.0, 10.0, 1.0), + (1, 0, (0, 1, 1), "HORIZONTAL", 1.0, 60.0, 90.0, 10.0, 1.0), + ] + lak_tables = [(0, "lak01.tab"), (1, "lak02.tab")] + lak = ModflowGwflak( + model, + pname="lak", + print_input=True, + mover=True, + nlakes=2, + noutlets=0, + ntables=1, + packagedata=lakpd, + connectiondata=lakecn, + tables=lak_tables, + ) + + table_01 = [ + (30.0, 100000.0, 10000.0), + (40.0, 200500.0, 10100.0), + (50.0, 301200.0, 10130.0), + (60.0, 402000.0, 10180.0), + (70.0, 503000.0, 10200.0), + (80.0, 700000.0, 20000.0), + ] + lak_tab = ModflowUtllaktab( + model, + filename="lak01.tab", + nrow=6, + ncol=3, + table=table_01, + ) + + table_02 = [ + (40.0, 100000.0, 10000.0), + (50.0, 200500.0, 10100.0), + (60.0, 301200.0, 10130.0), + (70.0, 402000.0, 10180.0), + (80.0, 503000.0, 10200.0), + (90.0, 700000.0, 20000.0), + ] + lak_tab_2 = ModflowUtllaktab( + model, + filename="lak02.tab", + nrow=6, + ncol=3, + table=table_02, + ) + wel_name_1 = wel.name[0] + lak_name_2 = lak.name[0] + package_data = [(wel_name_1,), (lak_name_2,)] + period_data = [(wel_name_1, 0, lak_name_2, 0, "FACTOR", 1.0)] + fname = f"{model.name}.input.mvr" + mvr = ModflowGwfmvr( + parent_model_or_package=model, + filename=fname, + print_input=True, + print_flows=True, + maxpackages=2, + maxmvr=1, + packages=package_data, + perioddata=period_data, + ) + + # test writing and loading model + sim.write_simulation() + sim.run_simulation() + + test_sim = MFSimulation.load( + sim_name, + "mf6", + "mf6", + out_dir, + write_headers=False, + ) + model = test_sim.get_model() + dis = model.get_package("dis") + rcha = model.get_package("rcha") + wel = model.get_package("wel") + drn = model.get_package("drn") + lak = model.get_package("lak") + lak_tab = model.get_package("laktab") + assert os.path.split(dis.filename)[1] == f"{model_name} 1.dis" + # do same tests as above + val_irch = rcha.irch.array.sum(axis=(1, 2, 3)) + assert val_irch[0] == 4 + assert val_irch[1] == 5 + assert val_irch[2] == 6 + assert val_irch[3] == 6 + val_irch_2 = rcha.irch.get_data() + assert val_irch_2[0] is None + assert val_irch_2[1][1, 1] == 1 + assert val_irch_2[2][1, 1] == 3 + assert val_irch_2[3] is None + val_rch = rcha.recharge.array.sum(axis=(1, 2, 3)) + assert val_rch[0] == 0.0 + assert val_rch[1] == 0.0004 + assert val_rch[2] == 0.00004 + assert val_rch[3] == 0.00004 + val_rch_2 = rcha.recharge.get_data() + assert val_rch_2[0] is None + assert val_rch_2[1][0, 0] == 0.0001 + assert val_rch_2[2][0, 0] == 0.00001 + assert val_rch_2[3] is None + aux_data_0 = rcha.aux.get_data(0) + assert aux_data_0 is None + aux_data_1 = rcha.aux.get_data(1) + assert aux_data_1[0][0][0] == 50.0 + aux_data_2 = rcha.aux.get_data(2) + assert aux_data_2 is None + aux_data_3 = rcha.aux.get_data(3) + assert aux_data_3[0][0][0] == 200.0 + + wel_array = wel.stress_period_data.array + assert wel_array[0] is None + assert wel_array[1][0][1] == 0.25 + assert wel_array[2][0][1] == 0.1 + assert wel_array[3][0][1] == 0.1 + + drn_array = drn.stress_period_data.array + assert drn_array[0][0][1] == 60.0 + assert drn_array[1][0][1] == 60.0 + assert drn_array[2] is None + assert drn_array[3][0][1] == 55.0 + drn_gd_0 = drn.stress_period_data.get_data(0) + assert drn_gd_0[0][1] == 60.0 + drn_gd_1 = drn.stress_period_data.get_data(1) + assert drn_gd_1 is None + drn_gd_2 = drn.stress_period_data.get_data(2) + assert len(drn_gd_2) == 0 + drn_gd_3 = drn.stress_period_data.get_data(3) + assert drn_gd_3[0][1] == 55.0 + + lak_tab_array = lak.tables.get_data() + assert lak_tab_array[0][1] == "lak01.tab" + assert lak_tab_array[1][1] == "lak02.tab" + + assert len(lak_tab) == 2 + lak_tab_1 = lak_tab[0].table.get_data() + assert lak_tab_1[0][0] == 30.0 + assert lak_tab_1[5][2] == 20000.0 + lak_tab_2 = lak_tab[1].table.get_data() + assert lak_tab_2[0][0] == 40.0 + assert lak_tab_2[4][1] == 503000.0 + + +@requires_exe("mf6") +def test_grid_array(function_tmpdir): + # get_data + # empty data in period block vs data repeating + # array + # aux values, test that they work the same as other arrays (is a value + # of zero always used even if aux is defined in a previous stress + # period?) + + sim_name = "test_grid_array" + model_name = "test_grid_array" + out_dir = function_tmpdir + tdis_name = f"{sim_name}.tdis" + sim = MFSimulation(sim_name=sim_name, version="mf6", exe_name="mf6", sim_ws=out_dir) + tdis_rc = [(6.0, 2, 1.0), (6.0, 3, 1.0), (6.0, 3, 1.0), (6.0, 3, 1.0)] + tdis = ModflowTdis(sim, time_units="DAYS", nper=4, perioddata=tdis_rc) + ims_package = ModflowIms( + sim, + pname="my_ims_file", + filename=f"{sim_name}.ims", + print_option="ALL", + complexity="SIMPLE", + outer_dvclose=0.0001, + outer_maximum=50, + under_relaxation="NONE", + inner_maximum=30, + inner_dvclose=0.0001, + linear_acceleration="CG", + preconditioner_levels=7, + preconditioner_drop_tolerance=0.01, + number_orthogonalizations=2, + ) + model = ModflowGwf(sim, modelname=model_name, model_nam_file=f"{model_name}.nam") + + dis = ModflowGwfdis( + model, + length_units="FEET", + nlay=4, + nrow=2, + ncol=2, + delr=5000.0, + delc=5000.0, + top=100.0, + botm=[50.0, 0.0, -50.0, -100.0], + filename=f"{model_name} 1.dis", + ) + ic_package = ModflowGwfic(model, strt=90.0, filename=f"{model_name}.ic") + npf_package = ModflowGwfnpf( + model, + pname="npf_1", + save_flows=True, + alternative_cell_averaging="logarithmic", + icelltype=1, + k=50.0, + ) + + oc_package = ModflowGwfoc( + model, + budget_filerecord=[("test_array.cbc",)], + head_filerecord=[("test_array.hds",)], + saverecord={ + 0: [("HEAD", "ALL"), ("BUDGET", "ALL")], + 1: [], + }, + printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], + ) + + aux = {1: [[50.0], [1.3]], 3: [[200.0], [1.5]]} + #aux2 = {1: [[50.0], [1.3]], 2: [[60.0], [11.3]], 3: [[200.0], [1.5]]} + irch = {1: [[0, 2], [2, 1]], 2: [[0, 1], [2, 3]]} + rcha = ModflowGwfrcha( + model, + print_input=True, + print_flows=True, + auxiliary=[("var1", "var2")], + irch=irch, + recharge={1: 0.0001, 2: 0.00001}, + aux=aux, + ) + print(f"RENO RCHA") + print(np.shape(rcha.recharge.array)) + print(rcha.recharge.array) + print(rcha.recharge.get_data()) + print(f"RENO RCHA AUX") + print(rcha.aux.get_data(0)) + print(rcha.aux.get_data(1)) + print(rcha.aux.get_data(2)) + print(rcha.aux.get_data(3)) + print(f"SHAPE: {np.shape(rcha.aux.array)}") + #rcha.aux.set_data(aux2) + print(rcha.aux.array) + print(rcha.aux.get_data()) + print(f"RENO RCHA AUX ARRAY:") + #assert False + val_irch = rcha.irch.array.sum(axis=(1, 2, 3)) + assert val_irch[0] == 4 + assert val_irch[1] == 5 + assert val_irch[2] == 6 + assert val_irch[3] == 6 + val_irch_2 = rcha.irch.get_data() + assert val_irch_2[0] is None + assert val_irch_2[1][1, 1] == 1 + assert val_irch_2[2][1, 1] == 3 + assert val_irch_2[3] is None + val_irch_2_3 = rcha.irch.get_data(3) + assert val_irch_2_3 is None + val_rch = rcha.recharge.array.sum(axis=(1, 2, 3)) + assert val_rch[0] == 0.0 + assert val_rch[1] == 0.0004 + assert val_rch[2] == 0.00004 + assert val_rch[3] == 0.00004 + val_rch_2 = rcha.recharge.get_data() + assert val_rch_2[0] is None + assert val_rch_2[1][0, 0] == 0.0001 + assert val_rch_2[2][0, 0] == 0.00001 + assert val_rch_2[3] is None + aux_data_0 = rcha.aux.get_data(0) + assert aux_data_0 is None + aux_data_1 = rcha.aux.get_data(1) + assert aux_data_1[0][0][0] == 50.0 + aux_data_2 = rcha.aux.get_data(2) + assert aux_data_2 is None + aux_data_3 = rcha.aux.get_data(3) + assert aux_data_3[0][0][0] == 200.0 + +# welspdict = {1: [[(0, 0, 0), 0.25, 0.0]], 2: [[(0, 0, 0), 0.1, 0.0]]} +# wel = ModflowGwfwel( +# model, +# print_input=True, +# print_flows=True, +# mover=True, +# stress_period_data=welspdict, +# save_flows=False, +# auxiliary="CONCENTRATION", +# pname="WEL-1", +# ) +# wel_array = wel.stress_period_data.array +# assert wel_array[0] is None +# assert wel_array[1][0][1] == 0.25 +# assert wel_array[2][0][1] == 0.1 +# assert wel_array[3][0][1] == 0.1 + + nlay = dis.nlay.get_data() + nrow = dis.nrow.get_data() + ncol = dis.ncol.get_data() + + DNODATA = 3.0e30 # MF6 DNODATA constant + welqspd = {} + welconcspd = {} + #for n in range(2): + for n in range(4): + q = np.full((nlay, nrow, ncol), DNODATA, dtype=float) + welconc = np.full((nlay, nrow, ncol), DNODATA, dtype=float) + welaux2 = np.full((nlay, nrow, ncol), DNODATA, dtype=float) + #if n == 0: + if n == 1: + q[0, 0, 0] = 0.25 + welconc[0, 0, 0] = 0.0 + welaux2[0, 0, 0] = 9.0 + #elif n == 1: + elif n == 2: + q[0, 0, 0] = 0.1 + welconc[0, 0, 0] = 0.0 + welaux2[0, 0, 0] = 9.0 + #welqspd[n + 1] = q + #welconcspd[n + 1] = [welconc, welaux2] + welqspd[n] = q + welconcspd[n] = [welconc, welaux2] + + # first create test package with multiple auxvars + wel = ModflowGwfwelg( + model, + print_input=True, + print_flows=True, + mover=True, + save_flows=False, + auxiliary=["var1", "var2"], + pname="WEL-1", + q=welqspd, + #aux={1: [[50.0], [1.3]], 3: [[200.0], [1.5]]}, + aux=welconcspd, + ) + + print(wel.q.array) + print("FULLONE:") + print(wel.q.get_data()) + print("FULLTWO:") + print(wel.aux.array) + print("FULLONEAUX:") + print(wel.aux.get_data()) + print("FULLTWOAUX:") + aux = wel.aux.array + print(type(aux)) + print(dir(aux)) + print(f"SHAPE: {np.shape(aux)}") + print("1") + print(wel.aux.array[1, 0]) + print("2") + print(wel.aux.array[1, 1]) + assert np.allclose(wel.aux.array[0][0], wel.aux.get_data(0)[0]) + assert np.allclose(wel.aux.array[0][1], wel.aux.get_data(0)[1]) + assert np.allclose(wel.aux.array[1][0], wel.aux.get_data(1)[0]) + assert np.allclose(wel.aux.array[1][1], wel.aux.get_data(1)[1]) + assert np.allclose(wel.aux.array[2][0], wel.aux.get_data(2)[0]) + assert np.allclose(wel.aux.array[2][1], wel.aux.get_data(2)[1]) + assert np.allclose(wel.aux.array[3][0], wel.aux.get_data(3)[0]) + assert np.allclose(wel.aux.array[3][1], wel.aux.get_data(3)[1]) + + + #assert False + assert wel.q.get_data()[0] is None + assert wel.q.get_data(0) is None + assert np.allclose(wel.q.get_data()[1], wel.q.get_data(1)) + assert np.allclose(wel.q.get_data()[2], wel.q.get_data(2)) + assert len(wel.q.array) == 4 + print(wel.q.array[1]) + print("ONE:") + print(wel.q.get_data(1)) + print("TWO:") + print(wel.q.array) + print("FULLONE:") + print(wel.q.get_data()) + print("FULLTWO:") + assert np.allclose(wel.q.array[1], wel.q.get_data(1)) + assert np.allclose(wel.q.array[2], wel.q.get_data(2)) + assert wel.q.get_data()[3] is None + assert wel.q.get_data(3) is None + + + sim.write_simulation() + assert False + print("RENO WELG") + #print(welqspd) + assert not wel.has_stress_period_data + print(wel.q.array) + print(wel.q.get_data()) + print(np.shape(wel.q.array)) + q_nan = np.where(wel.q.array == DNODATA, np.nan, wel.q.array) + val_q = np.nansum(q_nan, axis=(1, 2, 3, 4)) + assert val_q[0] == 0.0 + assert val_q[1] == 0.25 + assert val_q[2] == 0.1 + assert val_q[3] == 0.1 + val_q_2 = wel.q.get_data() + assert val_q_2[0] is None + assert val_q_2[1][0, 0, 0] == 0.25 + assert val_q_2[2][0, 0, 0] == 0.1 + assert val_q_2[3] is None + print("RENO WELG AUX") + print(wel.aux.array) + print(wel.aux.get_data(0)) + print(wel.aux.get_data(1)) + print(wel.aux.get_data(2)) + print(wel.aux.get_data(3)) + print(wel.aux.array) + aux_data_0 = wel.aux.get_data(0) + assert aux_data_0 is None + aux_data_1 = wel.aux.get_data(1) + print(aux_data_1) + assert aux_data_1[0][0][0][0] == 50.0 + assert aux_data_1[1][0][0][0] == 1.3 + aux_data_2 = wel.aux.get_data(2) + assert aux_data_2 is None + aux_data_3 = wel.aux.get_data(3) + assert aux_data_3[0][0][0][0] == 200.0 + assert aux_data_3[1][0][0][0] == 1.5 + #print(wel.stress_period_data) + for k in wel.q._data_storage.keys(): + print(f"RENO k={k}") + wel.q.get_data_prep(k) + print(wel.q._data_storage[k].get_data()) + + print(type(wel.q)) + print(dir(wel.q)) + print(type(wel.q[0])) + print(dir(wel.q[0])) + #assert wel.q[0] is None + #assert wel.q[1[0][1] == 0.25 + + # remove test wel package + wel.remove() + + # create welg package + wel = ModflowGwfwelg( + model, + print_input=True, + print_flows=True, + mover=True, + save_flows=False, + auxiliary=["CONCENTRATION"], + pname="WEL-1", + q=welqspd, + aux=welconcspd, + ) + + print("RENO WELG 2") + print(wel) + assert not wel.has_stress_period_data + print(wel.q.array) + print(wel.q.get_data()) + print(np.shape(wel.q.array)) + q_nan = np.where(wel.q.array == DNODATA, np.nan, wel.q.array) + val_q = np.nansum(q_nan, axis=(1, 2, 3, 4)) + assert val_q[0] == 0.0 + assert val_q[1] == 0.25 + assert val_q[2] == 0.1 + assert val_q[3] == 0.1 + val_q_2 = wel.q.get_data() + assert val_q_2[0] is None + assert val_q_2[1][0, 0, 0] == 0.25 + assert val_q_2[2][0, 0, 0] == 0.1 + assert val_q_2[3] is None + print("RENO WELG AUX 2") + #print(welconcspd) + print(wel.aux.get_data()) + print(wel.aux.array) + print(wel.aux.array) + print(wel.aux.get_data(0)) + print(wel.aux.array) + print(wel.aux.get_data(1)) + print(wel.aux.get_data(2)) + print(wel.aux.get_data(3)) + print(wel.aux.array) + aux_data_0 = wel.aux.get_data(0) + print(wel.aux.array) + assert aux_data_0 is None + aux_data_1 = wel.aux.get_data(1) + print(aux_data_1) + assert aux_data_1[0][0][0][0] == 0.0 + assert aux_data_1[0][0, 0, 0] == 0.0 + aux_data_2 = wel.aux.get_data(2) + #assert aux_data_2[0][0][0] == 0.0 + assert aux_data_2[0][0, 0, 0] == 0.0 + aux_data_3 = wel.aux.get_data(3) + assert aux_data_3 is None + + print(wel.aux.get_data()) + print(wel.aux.array) + #assert False + + drnspdict = { + 0: [[(0, 0, 0), 60.0, 10.0]], + 2: [], + 3: [[(0, 0, 0), 55.0, 5.0]], + } + drn = ModflowGwfdrn( + model, + print_input=True, + print_flows=True, + stress_period_data=drnspdict, + save_flows=False, + pname="DRN-1", + ) + drn_array = drn.stress_period_data.array + assert drn_array[0][0][1] == 60.0 + assert drn_array[1][0][1] == 60.0 + assert drn_array[2] is None + assert drn_array[3][0][1] == 55.0 + drn_gd_0 = drn.stress_period_data.get_data(0) + assert drn_gd_0[0][1] == 60.0 + drn_gd_1 = drn.stress_period_data.get_data(1) + assert drn_gd_1 is None + drn_gd_2 = drn.stress_period_data.get_data(2) + assert len(drn_gd_2) == 0 + drn_gd_3 = drn.stress_period_data.get_data(3) + assert drn_gd_3[0][1] == 55.0 + + ghbspdict = { + 0: [[(0, 1, 1), 60.0, 10.0]], + } + ghb = ModflowGwfghb( + model, + print_input=True, + print_flows=True, + stress_period_data=ghbspdict, + save_flows=False, + pname="GHB-1", + ) + + lakpd = [(0, 70.0, 1), (1, 65.0, 1)] + lakecn = [ + (0, 0, (0, 0, 0), "HORIZONTAL", 1.0, 60.0, 90.0, 10.0, 1.0), + (1, 0, (0, 1, 1), "HORIZONTAL", 1.0, 60.0, 90.0, 10.0, 1.0), + ] + lak_tables = [(0, "lak01.tab"), (1, "lak02.tab")] + lak = ModflowGwflak( + model, + pname="lak", + print_input=True, + mover=True, + nlakes=2, + noutlets=0, + ntables=1, + packagedata=lakpd, + connectiondata=lakecn, + tables=lak_tables, + ) + + table_01 = [ + (30.0, 100000.0, 10000.0), + (40.0, 200500.0, 10100.0), + (50.0, 301200.0, 10130.0), + (60.0, 402000.0, 10180.0), + (70.0, 503000.0, 10200.0), + (80.0, 700000.0, 20000.0), + ] + lak_tab = ModflowUtllaktab( + model, + filename="lak01.tab", + nrow=6, + ncol=3, + table=table_01, + ) + + table_02 = [ + (40.0, 100000.0, 10000.0), + (50.0, 200500.0, 10100.0), + (60.0, 301200.0, 10130.0), + (70.0, 402000.0, 10180.0), + (80.0, 503000.0, 10200.0), + (90.0, 700000.0, 20000.0), + ] + lak_tab_2 = ModflowUtllaktab( + model, + filename="lak02.tab", + nrow=6, + ncol=3, + table=table_02, + ) + wel_name_1 = wel.name[0] + lak_name_2 = lak.name[0] + package_data = [(wel_name_1,), (lak_name_2,)] + period_data = [(wel_name_1, 0, lak_name_2, 0, "FACTOR", 1.0)] + fname = f"{model.name}.input.mvr" + mvr = ModflowGwfmvr( + parent_model_or_package=model, + filename=fname, + print_input=True, + print_flows=True, + maxpackages=2, + maxmvr=1, + packages=package_data, + perioddata=period_data, + ) + + # test writing and loading model + print(wel.aux.array) + sim.write_simulation() + print(wel.aux.array) + sim.run_simulation() + print(wel.aux.array) + + test_sim = MFSimulation.load( + sim_name, + "mf6", + "mf6", + out_dir, + write_headers=False, + ) + model = test_sim.get_model() + dis = model.get_package("dis") + rcha = model.get_package("rcha") + wel = model.get_package("wel") + drn = model.get_package("drn") + lak = model.get_package("lak") + lak_tab = model.get_package("laktab") + assert os.path.split(dis.filename)[1] == f"{model_name} 1.dis" + # do same tests as above + val_irch = rcha.irch.array.sum(axis=(1, 2, 3)) + assert val_irch[0] == 4 + assert val_irch[1] == 5 + assert val_irch[2] == 6 + assert val_irch[3] == 6 + val_irch_2 = rcha.irch.get_data() + assert val_irch_2[0] is None + assert val_irch_2[1][1, 1] == 1 + assert val_irch_2[2][1, 1] == 3 + assert val_irch_2[3] is None + val_rch = rcha.recharge.array.sum(axis=(1, 2, 3)) + assert val_rch[0] == 0.0 + assert val_rch[1] == 0.0004 + assert val_rch[2] == 0.00004 + assert val_rch[3] == 0.00004 + val_rch_2 = rcha.recharge.get_data() + assert val_rch_2[0] is None + assert val_rch_2[1][0, 0] == 0.0001 + assert val_rch_2[2][0, 0] == 0.00001 + assert val_rch_2[3] is None + aux_data_0 = rcha.aux.get_data(0) + assert aux_data_0 is None + aux_data_1 = rcha.aux.get_data(1) + assert aux_data_1[0][0][0] == 50.0 + aux_data_2 = rcha.aux.get_data(2) + assert aux_data_2 is None + aux_data_3 = rcha.aux.get_data(3) + assert aux_data_3[0][0][0] == 200.0 + + # RENO TODO + #wel_array = wel.stress_period_data.array + #assert wel_array[0] is None + #assert wel_array[1][0][1] == 0.25 + #assert wel_array[2][0][1] == 0.1 + #assert wel_array[3][0][1] == 0.1 + welg_q_per = wel.q.get_data() + #print(welg_q_per) + #print(wel.aux.get_data()) + print(wel.q.array) + print(wel.aux.array) + return + assert welg_q_per[0] == None + assert welg_q_per[1][0, 0, 0] == 0.25 + assert welg_q_per[2][0, 0, 0] == 0.1 + assert welg_q_per[3][0, 0, 0] == 0.1 + + + welg_q_per1 = wel.q.get_data(1) + print(wel.q.array) + assert welg_q_per1[0, 0, 0] == 0.25 + welg_aux_per1 = wel.aux.get_data(1) + assert welg_aux_per1[0][0, 0, 0] == 0.0 + + drn_array = drn.stress_period_data.array + assert drn_array[0][0][1] == 60.0 + assert drn_array[1][0][1] == 60.0 + assert drn_array[2] is None + assert drn_array[3][0][1] == 55.0 + drn_gd_0 = drn.stress_period_data.get_data(0) + assert drn_gd_0[0][1] == 60.0 + drn_gd_1 = drn.stress_period_data.get_data(1) + assert drn_gd_1 is None + drn_gd_2 = drn.stress_period_data.get_data(2) + assert len(drn_gd_2) == 0 + drn_gd_3 = drn.stress_period_data.get_data(3) + assert drn_gd_3[0][1] == 55.0 + + lak_tab_array = lak.tables.get_data() + assert lak_tab_array[0][1] == "lak01.tab" + assert lak_tab_array[1][1] == "lak02.tab" + + assert len(lak_tab) == 2 + lak_tab_1 = lak_tab[0].table.get_data() + assert lak_tab_1[0][0] == 30.0 + assert lak_tab_1[5][2] == 20000.0 + lak_tab_2 = lak_tab[1].table.get_data() + assert lak_tab_2[0][0] == 40.0 + assert lak_tab_2[4][1] == 503000.0 + + +@requires_exe("mf6") +def test_multi_model(function_tmpdir): + # init paths + test_ex_name = "test_multi_model" + model_names = ["gwf_model_1", "gwf_model_2", "gwt_model_1", "gwt_model_2"] + + # temporal discretization + nper = 1 + perlen = [5.0] + nstp = [200] + tsmult = [1.0] + tdis_rc = [] + for i in range(nper): + tdis_rc.append((perlen[i], nstp[i], tsmult[i])) + + # build MODFLOW 6 files + sim = MFSimulation( + sim_name=test_ex_name, + version="mf6", + exe_name="mf6", + sim_ws=str(function_tmpdir), + ) + # create tdis package + tdis = ModflowTdis( + sim, time_units="DAYS", nper=nper, perioddata=tdis_rc, pname="sim.tdis" + ) + + # grid information + nlay, nrow, ncol = 1, 1, 50 + + # Create gwf1 model + welspd = {0: [[(0, 0, 0), 1.0, 1.0]]} + chdspd = None + gwf1 = get_gwf_model( + sim, + model_names[0], + model_names[0], + (nlay, nrow, ncol), + chdspd=chdspd, + welspd=welspd, + ) + + # Create gwf2 model + welspd = {0: [[(0, 0, 1), 0.5, 0.5]]} + chdspd = {0: [[(0, 0, ncol - 1), 0.0000000]]} + gwf2 = get_gwf_model( + sim, + model_names[1], + model_names[1], + (nlay, nrow, ncol), + chdspd=chdspd, + welspd=welspd, + ) + lakpd = [(0, -100.0, 1)] + lakecn = [(0, 0, (0, 0, 0), "HORIZONTAL", 1.0, 0.1, 1.0, 10.0, 1.0)] + lak_2 = ModflowGwflak( + gwf2, + pname="lak2", + print_input=True, + mover=True, + nlakes=1, + noutlets=0, + ntables=0, + packagedata=lakpd, + connectiondata=lakecn, + ) + + # gwf-gwf + gwfgwf_data = [] + for col in range(0, ncol): + gwfgwf_data.append([(0, 0, col), (0, 0, col), 1, 0.5, 0.5, 1.0, 0.0, 1.0]) + gwfgwf = ModflowGwfgwf( + sim, + exgtype="GWF6-GWF6", + nexg=len(gwfgwf_data), + exgmnamea=gwf1.name, + exgmnameb=gwf2.name, + exchangedata=gwfgwf_data, + auxiliary=["ANGLDEGX", "CDIST"], + filename="flow1_flow2.gwfgwf", + ) + # set up mvr package + wel_1 = gwf1.get_package("wel") + wel_1.mover.set_data(True) + wel_name_1 = wel_1.name[0] + lak_name_2 = lak_2.name[0] + package_data = [(gwf1.name, wel_name_1), (gwf2.name, lak_name_2)] + period_data = [(gwf1.name, wel_name_1, 0, gwf2.name, lak_name_2, 0, "FACTOR", 1.0)] + fname = "gwfgwf.input.mvr" + gwfgwf.mvr.initialize( + filename=fname, + modelnames=True, + print_input=True, + print_flows=True, + maxpackages=2, + maxmvr=1, + packages=package_data, + perioddata=period_data, + ) + + gnc_data = [] + for col in range(0, ncol): + if col < ncol / 2.0: + gnc_data.append(((0, 0, col), (0, 0, col), (0, 0, col + 1), 0.25)) + else: + gnc_data.append(((0, 0, col), (0, 0, col), (0, 0, col - 1), 0.25)) + + # set up gnc package + fname = "gwfgwf.input.gnc" + gwfgwf.gnc.initialize( + filename=fname, + print_input=True, + print_flows=True, + numgnc=ncol, + numalphaj=1, + gncdata=gnc_data, + ) + + # Observe flow for exchange + gwfgwfobs = {} + obs_list = [] + for col in range(0, ncol): + obs_list.append([f"exchange_flow_{col}", "FLOW-JA-FACE", (col,)]) + gwfgwfobs["gwfgwf.output.obs.csv"] = obs_list + fname = "gwfgwf.input.obs" + gwfgwf.obs.initialize( + filename=fname, digits=25, print_input=True, continuous=gwfgwfobs + ) + + # Create gwt model + sourcerecarray = [("WEL-1", "AUX", "CONCENTRATION")] + gwt = get_gwt_model( + sim, + model_names[2], + model_names[2], + (nlay, nrow, ncol), + sourcerecarray=sourcerecarray, + ) + + # GWF GWT exchange + gwfgwt = ModflowGwfgwt( + sim, + exgtype="GWF6-GWT6", + exgmnamea=model_names[0], + exgmnameb=model_names[2], + filename="flow1_transport1.gwfgwt", + ) + + # solver settings + nouter, ninner = 100, 300 + hclose, rclose, relax = 1e-6, 1e-6, 1.0 + + # create iterative model solution and register the gwf model with it + imsgwf = ModflowIms( + sim, + print_option="SUMMARY", + outer_dvclose=hclose, + outer_maximum=nouter, + under_relaxation="NONE", + inner_maximum=ninner, + inner_dvclose=hclose, + rcloserecord=rclose, + linear_acceleration="BICGSTAB", + scaling_method="NONE", + reordering_method="NONE", + relaxation_factor=relax, + filename="flow.ims", + ) + + # create iterative model solution and register the gwt model with it + imsgwt = ModflowIms( + sim, + print_option="SUMMARY", + outer_dvclose=hclose, + outer_maximum=nouter, + under_relaxation="NONE", + inner_maximum=ninner, + inner_dvclose=hclose, + rcloserecord=rclose, + linear_acceleration="BICGSTAB", + scaling_method="NONE", + reordering_method="NONE", + relaxation_factor=relax, + filename="transport.ims", + ) + sim.register_ims_package(imsgwt, [gwt.name]) + + sim.write_simulation() + sim.run_simulation() + + # reload simulation + sim2 = MFSimulation.load(sim_ws=str(function_tmpdir)) + + # check ims registration + solution_recarray = sim2.name_file.solutiongroup + for solution_group_num in solution_recarray.get_active_key_list(): + rec_array = solution_recarray.get_data(solution_group_num[0]) + assert rec_array[0][1] == "flow.ims" + assert rec_array[0][2] == model_names[0] + assert rec_array[0][3] == model_names[1] + assert rec_array[1][1] == "transport.ims" + assert rec_array[1][2] == model_names[2] + assert gwf1.get_ims_package() is gwf2.get_ims_package() + assert gwf1.get_ims_package().filename == "flow.ims" + assert gwt.get_ims_package().filename == "transport.ims" + # test ssm fileinput + gwt2 = sim2.get_model("gwt_model_1") + ssm2 = gwt2.get_package("ssm") + fileinput = [ + ("RCH-1", "gwt_model_1.rch1.spc"), + ("RCH-2", "gwt_model_1.rch2.spc"), + ("RCH-3", "gwt_model_1.rch3.spc", "MIXED"), + ("RCH-4", "gwt_model_1.rch4.spc"), + ] + ssm2.fileinput = fileinput + fi_out = ssm2.fileinput.get_data() + assert fi_out[2][1] == "gwt_model_1.rch3.spc" + assert fi_out[1][2] is None + assert fi_out[2][2] == "MIXED" + + spca1 = ModflowUtlspca(gwt2, filename="gwt_model_1.rch1.spc", print_input=True) + spca2 = ModflowUtlspca(gwt2, filename="gwt_model_1.rch2.spc", print_input=False) + spca3 = ModflowUtlspca(gwt2, filename="gwt_model_1.rch3.spc", print_input=True) + spca4 = ModflowUtlspca(gwt2, filename="gwt_model_1.rch4.spc", print_input=True) + + # test writing and loading spca packages + sim2.write_simulation() + sim3 = MFSimulation.load(sim_ws=sim2.sim_path) + gwt3 = sim3.get_model("gwt_model_1") + spc1 = gwt3.get_package("gwt_model_1.rch1.spc") + assert isinstance(spc1, ModflowUtlspca) + assert spc1.print_input.get_data() is True + spc2 = gwt3.get_package("gwt_model_1.rch2.spc") + assert spc2.print_input.get_data() is not True + + # create a new gwt model + sourcerecarray = [("WEL-1", "AUX", "CONCENTRATION")] + gwt_2 = get_gwt_model( + sim, + model_names[3], + model_names[3], + (nlay, nrow, ncol), + sourcerecarray=sourcerecarray, + ) + # register gwt model with transport.ims + sim.register_ims_package(imsgwt, gwt_2.name) + # flow and transport exchange + gwfgwt = ModflowGwfgwt( + sim, + exgtype="GWF6-GWT6", + exgmnamea=model_names[1], + exgmnameb=model_names[3], + filename="flow2_transport2.gwfgwt", + ) + # save and run updated model + sim.write_simulation() + sim.run_simulation() + + with pytest.raises( + flopy.mf6.mfbase.FlopyException, + match='Extraneous kwargs "param_does_not_exist" provided to MFPackage.', + ): + # test kwargs error checking + wel = ModflowGwfwel( + gwf2, + print_input=True, + print_flows=True, + stress_period_data=welspd, + save_flows=False, + auxiliary="CONCENTRATION", + pname="WEL-1", + param_does_not_exist=True, + ) + + +@requires_exe("mf6") +def test_namefile_creation(function_tmpdir): + test_ex_name = "test_namefile" + # build MODFLOW 6 files + sim = MFSimulation( + sim_name=test_ex_name, + version="mf6", + exe_name="mf6", + sim_ws=str(function_tmpdir), + ) + + tdis_rc = [(6.0, 2, 1.0), (6.0, 3, 1.0), (6.0, 3, 1.0), (6.0, 3, 1.0)] + tdis = ModflowTdis(sim, time_units="DAYS", nper=4, perioddata=tdis_rc) + ims_package = ModflowIms( + sim, + pname="my_ims_file", + filename=f"{test_ex_name}.ims", + print_option="ALL", + complexity="SIMPLE", + outer_dvclose=0.0001, + outer_maximum=50, + under_relaxation="NONE", + inner_maximum=30, + inner_dvclose=0.0001, + linear_acceleration="CG", + preconditioner_levels=7, + preconditioner_drop_tolerance=0.01, + number_orthogonalizations=2, + ) + model = ModflowGwf( + sim, + modelname=test_ex_name, + model_nam_file=f"{test_ex_name}.nam", + ) + + # try to create simulation name file + ex_happened = False + try: + nam = ModflowNam(sim) + except flopy.mf6.mfbase.FlopyException: + ex_happened = True + assert ex_happened + + # try to create model name file + ex_happened = False + try: + nam = ModflowGwfnam(model) + except flopy.mf6.mfbase.FlopyException: + ex_happened = True + assert ex_happened + + +def test_remove_model(function_tmpdir, example_data_path): + # load a multi-model simulation + sim_ws = str(example_data_path / "mf6" / "test006_2models_mvr") + sim = MFSimulation.load(sim_ws=sim_ws, exe_name="mf6") + + # original simulation should contain models: + # - 'parent', with files named 'model1.ext' + # - 'child', with files named 'model2.ext' + assert len(sim.model_names) == 2 + assert "parent" in sim.model_names + assert "child" in sim.model_names + + # remove the child model + sim.remove_model("child") + + # simulation should now only contain the parent model + assert len(sim.model_names) == 1 + assert "parent" in sim.model_names + + # write simulation input files + sim.set_sim_path(function_tmpdir) + sim.write_simulation() + + # there should be no input files for the child model + files = list(function_tmpdir.glob("*")) + assert not any("model2" in f.name for f in files) + + # there should be no model or solver entry for the child model + # in the simulation namefile + lines = open(function_tmpdir / "mfsim.nam").readlines() + lines = [l.lower().strip() for l in lines] + assert not any("model2" in l for l in lines) + assert not any("child" in l for l in lines) + + # there should be no exchanges either + exg_index = 0 + for i, l in enumerate(lines): + if "begin exchanges" in l: + exg_index = i + elif exg_index > 0: + assert "end exchanges" in l + break + + +@requires_pkg("shapely") +@requires_exe("triangle") +def test_flopy_2283(function_tmpdir): + # create triangular grid + triangle_ws = function_tmpdir / "triangle" + triangle_ws.mkdir() + + active_area = [(0, 0), (0, 1000), (1000, 1000), (1000, 0)] + tri = Triangle(model_ws=triangle_ws, angle=30) + tri.add_polygon(active_area) + tri.add_region((1, 1), maximum_area=50**2) + + tri.build() + + # build vertex grid object + vgrid = flopy.discretization.VertexGrid( + vertices=tri.get_vertices(), + cell2d=tri.get_cell2d(), + xoff=199000, + yoff=215500, + crs=31370, + angrot=30, + ) + + # coord info is set (also correct when using vgrid.set_coord_info() + print(vgrid) + + # create MODFLOW 6 model + ws = function_tmpdir / "model" + ws.mkdir() + sim = flopy.mf6.MFSimulation(sim_name="prj-test", sim_ws=ws) + tdis = flopy.mf6.ModflowTdis(sim) + ims = flopy.mf6.ModflowIms(sim) + + gwf = flopy.mf6.ModflowGwf(sim, modelname="gwf") + disv = flopy.mf6.ModflowGwfdisv( + gwf, + xorigin=vgrid.xoffset, + yorigin=vgrid.yoffset, + angrot=vgrid.angrot, # no CRS info can be set in DISV + nlay=1, + top=0.0, + botm=-10.0, + ncpl=vgrid.ncpl, + nvert=vgrid.nvert, + cell2d=vgrid.cell2d, + vertices=tri.get_vertices(), # this is not stored in the Vertex grid object? + ) + + assert gwf.modelgrid.xoffset == disv.xorigin.get_data() + assert gwf.modelgrid.yoffset == disv.yorigin.get_data() + assert gwf.modelgrid.angrot == disv.angrot.get_data() diff --git a/autotest/tmp/test_mf6.py b/autotest/tmp/test_mf6.py new file mode 100644 index 0000000000..b723212803 --- /dev/null +++ b/autotest/tmp/test_mf6.py @@ -0,0 +1,2930 @@ +import os +import platform +from pathlib import Path +from shutil import copytree, which + +import numpy as np +import pytest +from modflow_devtools.markers import requires_exe, requires_pkg +from modflow_devtools.misc import set_dir + +import flopy +from flopy.mf6 import ( + MFModel, + MFSimulation, + ModflowGwf, + ModflowGwfchd, + ModflowGwfdis, + ModflowGwfdisu, + ModflowGwfdisv, + ModflowGwfdrn, + ModflowGwfevt, + ModflowGwfevta, + ModflowGwfghb, + ModflowGwfgnc, + ModflowGwfgwf, + ModflowGwfgwt, + ModflowGwfhfb, + ModflowGwfic, + ModflowGwflak, + ModflowGwfmaw, + ModflowGwfmvr, + ModflowGwfnam, + ModflowGwfnpf, + ModflowGwfoc, + ModflowGwfrch, + ModflowGwfrcha, + ModflowGwfriv, + ModflowGwfsfr, + ModflowGwfsto, + ModflowGwfuzf, + ModflowGwfwel, + ModflowGwfwelg, + ModflowGwtadv, + ModflowGwtdis, + ModflowGwtic, + ModflowGwtmst, + ModflowGwtoc, + ModflowGwtssm, + ModflowIms, + ModflowNam, + ModflowTdis, + ModflowUtllaktab, + ModflowUtlspca, +) +from flopy.mf6.coordinates.modeldimensions import ( + DataDimensions, + ModelDimensions, + PackageDimensions, +) +from flopy.mf6.data.mffileaccess import MFFileAccessArray +from flopy.mf6.data.mfstructure import MFDataItemStructure, MFDataStructure +from flopy.mf6.mfsimbase import MFSimulationData +from flopy.mf6.modflow import ( + mfgwf, + mfgwfdis, + mfgwfdrn, + mfgwfic, + mfgwfnpf, + mfgwfoc, + mfgwfriv, + mfgwfsto, + mfgwfwel, + mfims, + mftdis, +) +from flopy.utils import CellBudgetFile, HeadFile, Mf6ListBudget, Mf6Obs, ZoneBudget6 +from flopy.utils.observationfile import CsvFile +from flopy.utils.triangle import Triangle +from flopy.utils.voronoi import VoronoiGrid + +pytestmark = pytest.mark.mf6 + + +def write_head( + fbin, + data, + kstp=1, + kper=1, + pertim=1.0, + totim=1.0, + text=" HEAD", + ilay=1, +): + dt = np.dtype( + [ + ("kstp", "i4"), + ("kper", "i4"), + ("pertim", "f8"), + ("totim", "f8"), + ("text", "S16"), + ("ncol", "i4"), + ("nrow", "i4"), + ("ilay", "i4"), + ] + ) + nrow = data.shape[0] + ncol = data.shape[1] + h = np.array((kstp, kper, pertim, totim, text, ncol, nrow, ilay), dtype=dt) + h.tofile(fbin) + data.tofile(fbin) + + +def get_gwf_model(sim, gwfname, gwfpath, modelshape, chdspd=None, welspd=None): + nlay, nrow, ncol = modelshape + delr = 1.0 + delc = 1.0 + top = 1.0 + botm = [0.0] + strt = 1.0 + hk = 1.0 + laytyp = 0 + + gwf = ModflowGwf( + sim, + modelname=gwfname, + save_flows=True, + ) + gwf.set_model_relative_path(gwfpath) + + dis = ModflowGwfdis( + gwf, + nlay=nlay, + nrow=nrow, + ncol=ncol, + delr=delr, + delc=delc, + top=top, + botm=botm, + ) + + # initial conditions + ic = ModflowGwfic(gwf, strt=strt) + + # node property flow + npf = ModflowGwfnpf( + gwf, + icelltype=laytyp, + k=hk, + save_specific_discharge=True, + ) + + # chd files + if chdspd is not None: + chd = ModflowGwfchd( + gwf, + stress_period_data=chdspd, + save_flows=False, + pname="CHD-1", + ) + + # wel files + if welspd is not None: + wel = ModflowGwfwel( + gwf, + print_input=True, + print_flows=True, + stress_period_data=welspd, + save_flows=False, + auxiliary="CONCENTRATION", + pname="WEL-1", + ) + + # output control + oc = ModflowGwfoc( + gwf, + budget_filerecord=f"{gwfname}.cbc", + head_filerecord=f"{gwfname}.hds", + headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")], + saverecord=[("HEAD", "LAST"), ("BUDGET", "LAST")], + printrecord=[("HEAD", "LAST"), ("BUDGET", "LAST")], + ) + return gwf + + +def get_gwt_model(sim, gwtname, gwtpath, modelshape, sourcerecarray=None): + nlay, nrow, ncol = modelshape + delr = 1.0 + delc = 1.0 + top = 1.0 + botm = [0.0] + strt = 1.0 + hk = 1.0 + laytyp = 0 + + gwt = MFModel( + sim, + model_type="gwt6", + modelname=gwtname, + model_rel_path=gwtpath, + ) + gwt.name_file.save_flows = True + + dis = ModflowGwtdis( + gwt, + nlay=nlay, + nrow=nrow, + ncol=ncol, + delr=delr, + delc=delc, + top=top, + botm=botm, + ) + + # initial conditions + ic = ModflowGwtic(gwt, strt=0.0) + + # advection + adv = ModflowGwtadv(gwt, scheme="upstream") + + # mass storage and transfer + mst = ModflowGwtmst(gwt, porosity=0.1) + + # sources + ssm = ModflowGwtssm(gwt, sources=sourcerecarray) + + # output control + oc = ModflowGwtoc( + gwt, + budget_filerecord=f"{gwtname}.cbc", + concentration_filerecord=f"{gwtname}.ucn", + concentrationprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")], + saverecord=[("CONCENTRATION", "LAST"), ("BUDGET", "LAST")], + printrecord=[("CONCENTRATION", "LAST"), ("BUDGET", "LAST")], + ) + return gwt + + +def to_win_sep(s): + return s.replace("/", "\\") + + +def to_posix_sep(s): + return s.replace("\\", "/") + + +def to_os_sep(s): + return s.replace("\\", os.sep).replace("/", os.sep) + + +@requires_exe("mf6") +def test_load_and_run_sim_when_namefile_uses_filenames( + function_tmpdir, example_data_path +): + # copy model input files to temp workspace + model_name = "mf6-freyberg" + workspace = function_tmpdir / model_name + copytree(example_data_path / model_name, workspace) + + # load, check and run simulation + sim = MFSimulation.load(sim_ws=workspace) + sim.check() + success, _ = sim.run_simulation(report=True) + assert success + + +@requires_exe("mf6") +def test_load_and_run_sim_when_namefile_uses_abs_paths( + function_tmpdir, example_data_path +): + # copy model input files to temp workspace + model_name = "freyberg" + workspace = function_tmpdir / "ws" + copytree(example_data_path / f"mf6-{model_name}", workspace) + + # sub abs paths into namefile + with set_dir(workspace): + nam_path = workspace / "mfsim.nam" + lines = open(nam_path).readlines() + with open(nam_path, "w") as f: + for l in lines: + pattern = f"{model_name}." + if pattern in l: + l = l.replace(pattern, str(workspace.absolute()) + os.sep + pattern) + f.write(l) + + # load, check and run simulation + sim = MFSimulation.load(sim_ws=workspace) + sim.check() + success, _ = sim.run_simulation(report=True) + assert success + + +@requires_exe("mf6") +@pytest.mark.parametrize("sep", ["win", "posix"]) +def test_load_sim_when_namefile_uses_rel_paths(function_tmpdir, example_data_path, sep): + # copy model input files to temp workspace + model_name = "freyberg" + workspace = function_tmpdir / "ws" + copytree(example_data_path / f"mf6-{model_name}", workspace) + + # sub rel paths into namefile + with set_dir(workspace): + nam_path = workspace / "mfsim.nam" + lines = open(nam_path).readlines() + with open(nam_path, "w") as f: + for l in lines: + pattern = f"{model_name}." + if pattern in l: + if sep == "win": + l = to_win_sep( + l.replace( + pattern, "../" + workspace.name + "/" + model_name + "." + ) + ) + else: + l = to_posix_sep( + l.replace( + pattern, "../" + workspace.name + "/" + model_name + "." + ) + ) + f.write(l) + + # load and check simulation + sim = MFSimulation.load(sim_ws=workspace) + sim.check() + + # don't run simulation with Windows sep on Linux or Mac + if sep == "win" and platform.system() != "Windows": + return + + # run simulation + success, _ = sim.run_simulation(report=True) + assert success + + +@pytest.mark.skip(reason="currently flopy uses OS-specific path separators") +@pytest.mark.parametrize("sep", ["win", "posix"]) +def test_write_simulation_always_writes_posix_path_separators( + function_tmpdir, example_data_path, sep +): + # copy model input files to temp workspace + model_name = "freyberg" + workspace = function_tmpdir / "ws" + copytree(example_data_path / f"mf6-{model_name}", workspace) + + # use OS-specific path separators + with set_dir(workspace): + nam_path = workspace / "mfsim.nam" + lines = open(nam_path).readlines() + with open(nam_path, "w") as f: + for l in lines: + pattern = f"{model_name}." + if pattern in l: + if sep == "win": + l = to_win_sep( + l.replace( + pattern, "../" + workspace.name + "/" + model_name + "." + ) + ) + else: + l = to_posix_sep( + l.replace( + pattern, "../" + workspace.name + "/" + model_name + "." + ) + ) + f.write(l) + + # load and write simulation + sim = MFSimulation.load(sim_ws=workspace) + sim.write_simulation() + + # make sure posix separators were written + lines = open(workspace / "mfsim.nam").readlines() + assert all("\\" not in l for l in lines) + + +@requires_exe("mf6") +@pytest.mark.parametrize("filename", ["name", "rel", "rel_win"]) +def test_basic_gwf(function_tmpdir, filename): + ws = function_tmpdir + name = "basic_gwf_prep" + sim = flopy.mf6.MFSimulation(sim_name=name, sim_ws=ws, exe_name="mf6") + pd = [(1.0, 1, 1.0), (1.0, 1, 1.0)] + + innerdir = Path(function_tmpdir / "inner") + innerdir.mkdir() + + # mfpackage filename can be path or string.. + # if string, it can either be a file name or + # path relative to the simulation workspace. + tdis_name = f"{name}.tdis" + tdis_path = innerdir / tdis_name + tdis_path.touch() + tdis_relpath = tdis_path.relative_to(ws).as_posix() + tdis_relpath_win = str(tdis_relpath).replace("/", "\\") + + if filename == "name": + # file named with no path will be created in simulation workspace + tdis = flopy.mf6.ModflowTdis( + sim, nper=len(pd), perioddata=pd, filename=tdis_name + ) + assert tdis.filename == tdis_name + elif filename == "rel": + # filename may be a relative pathlib.Path + tdis = flopy.mf6.ModflowTdis( + sim, nper=len(pd), perioddata=pd, filename=tdis_relpath + ) + assert tdis.filename == str(tdis_relpath) + + # relative paths may also be provided as strings + tdis = flopy.mf6.ModflowTdis( + sim, nper=len(pd), perioddata=pd, filename=str(tdis_relpath) + ) + assert tdis.filename == str(tdis_relpath) + elif filename == "rel_win": + # windows path backslash separator should be converted to forward slash + tdis = flopy.mf6.ModflowTdis( + sim, nper=len(pd), perioddata=pd, filename=tdis_relpath_win + ) + assert tdis.filename == str(tdis_relpath) + + # create other packages + ims = flopy.mf6.ModflowIms(sim) + gwf = flopy.mf6.ModflowGwf(sim, modelname=name, save_flows=True) + dis = flopy.mf6.ModflowGwfdis(gwf, nrow=10, ncol=10) + ic = flopy.mf6.ModflowGwfic(gwf) + npf = flopy.mf6.ModflowGwfnpf( + gwf, save_specific_discharge=True, save_saturation=True + ) + spd = { + 0: [[(0, 0, 0), 1.0, 1.0], [(0, 9, 9), 0.0, 0.0]], + 1: [[(0, 0, 0), 0.0, 0.0], [(0, 9, 9), 1.0, 2.0]], + } + chd = flopy.mf6.ModflowGwfchd( + gwf, pname="CHD-1", stress_period_data=spd, auxiliary=["concentration"] + ) + budget_file = f"{name}.bud" + head_file = f"{name}.hds" + oc = flopy.mf6.ModflowGwfoc( + gwf, + budget_filerecord=budget_file, + head_filerecord=head_file, + saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], + ) + + # write the simulation + sim.write_simulation() + + # check for input files + assert (ws / innerdir / tdis_name).is_file() + assert (ws / f"{name}.ims").is_file() + assert (ws / f"{name}.dis").is_file() + assert (ws / f"{name}.ic").is_file() + assert (ws / f"{name}.npf").is_file() + assert (ws / f"{name}.chd").is_file() + assert (ws / f"{name}.oc").is_file() + + # run the simulation + sim.run_simulation() + + # check for output files + assert (ws / budget_file).is_file() + assert (ws / head_file).is_file() + + +def test_subdir(function_tmpdir): + sim = MFSimulation(sim_ws=function_tmpdir) + assert sim.sim_path == function_tmpdir + + tdis = ModflowTdis(sim) + gwf = ModflowGwf(sim, model_rel_path="level2") + ims = ModflowIms(sim) + sim.register_ims_package(ims, []) + dis = ModflowGwfdis(gwf) + sim.set_all_data_external(external_data_folder="dat") + sim.write_simulation() + + sim_r = MFSimulation.load( + "mfsim.nam", + sim_ws=sim.simulation_data.mfpath.get_sim_path(), + ) + gwf_r = sim_r.get_model() + assert gwf.dis.delc.get_file_entry() == gwf_r.dis.delc.get_file_entry(), ( + "Something wrong with model external paths" + ) + + sim_r.set_all_data_internal() + sim_r.set_all_data_external(external_data_folder=os.path.join("dat", "dat_l2")) + sim_r.write_simulation() + + sim_r2 = MFSimulation.load( + "mfsim.nam", + sim_ws=sim_r.simulation_data.mfpath.get_sim_path(), + ) + gwf_r2 = sim_r.get_model() + assert gwf_r.dis.delc.get_file_entry() == gwf_r2.dis.delc.get_file_entry(), ( + "Something wrong with model external paths" + ) + + +@requires_exe("mf6") +@pytest.mark.parametrize("layered", [True, False]) +def test_binary_write(function_tmpdir, layered): + nlay, nrow, ncol = 2, 1, 10 + shape2d = (nrow, ncol) + + # data for layers + botm = [4.0, 0.0] + strt = [5.0, 10.0] + + # create binary data structured + if layered: + idomain_data = [] + botm_data = [] + strt_data = [] + for k in range(nlay): + idomain_data.append( + { + "factor": 1.0, + "filename": f"idomain_l{k + 1}.bin", + "data": 1, + "binary": True, + "iprn": 1, + } + ) + botm_data.append( + { + "filename": f"botm_l{k + 1}.bin", + "binary": True, + "iprn": 1, + "data": np.full(shape2d, botm[k], dtype=float), + } + ) + strt_data.append( + { + "filename": f"strt_l{k + 1}.bin", + "binary": True, + "iprn": 1, + "data": np.full(shape2d, strt[k], dtype=float), + } + ) + else: + idomain_data = { + "filename": "idomain.bin", + "binary": True, + "iprn": 1, + "data": 1, + } + botm_data = { + "filename": "botm.bin", + "binary": True, + "iprn": 1, + "data": np.array( + [ + np.full(shape2d, botm[0], dtype=float), + np.full(shape2d, botm[1], dtype=float), + ] + ), + } + strt_data = { + "filename": "strt.bin", + "binary": True, + "iprn": 1, + "data": np.array( + [ + np.full(shape2d, strt[0], dtype=float), + np.full(shape2d, strt[1], dtype=float), + ] + ), + } + + # binary data that does not vary by layers + top_data = { + "filename": "top.bin", + "binary": True, + "iprn": 1, + "data": 10.0, + } + rch_data = { + 0: { + "filename": "recharge.bin", + "binary": True, + "iprn": 1, + "data": 0.000001, + }, + } + chd_data = [ + (1, 0, 0, 10.0, 1.0, 100.0), + (1, 0, ncol - 1, 5.0, 0.0, 100.0), + ] + chd_data = { + 0: { + "filename": "chd.bin", + "binary": True, + "iprn": 1, + "data": chd_data, + }, + } + + sim = MFSimulation(sim_ws=str(function_tmpdir)) + ModflowTdis(sim) + ModflowIms(sim, complexity="simple") + gwf = ModflowGwf(sim, print_input=True) + ModflowGwfdis( + gwf, + nlay=nlay, + nrow=nrow, + ncol=ncol, + delr=1.0, + delc=1.0, + top=top_data, + botm=botm_data, + idomain=idomain_data, + ) + ModflowGwfnpf( + gwf, + icelltype=1, + ) + ModflowGwfic( + gwf, + strt=strt_data, + ) + ModflowGwfchd( + gwf, + auxiliary=["conc", "something"], + stress_period_data=chd_data, + ) + ModflowGwfrcha(gwf, recharge=rch_data) + + sim.write_simulation() + success, buff = sim.run_simulation() + assert success + + +@requires_exe("mf6") +@requires_pkg("shapely", "scipy") +@pytest.mark.parametrize("layered", [True, False]) +def test_vor_binary_write(function_tmpdir, layered): + # build voronoi grid + boundary = [(0.0, 0.0), (0.0, 1.0), (10.0, 1.0), (10.0, 0.0)] + triangle_ws = function_tmpdir / "triangle" + triangle_ws.mkdir(parents=True, exist_ok=True) + + tri = Triangle( + angle=30, + maximum_area=1.0, + model_ws=triangle_ws, + ) + tri.add_polygon(boundary) + tri.build(verbose=False) + vor = VoronoiGrid(tri) + + # problem dimensions + nlay = 2 + + # data for layers + botm = [4.0, 0.0] + strt = [5.0, 10.0] + + # build binary data + if layered: + idomain_data = [] + botm_data = [] + strt_data = [] + for k in range(nlay): + idomain_data.append( + { + "factor": 1.0, + "filename": f"idomain_l{k + 1}.bin", + "data": 1, + "binary": True, + "iprn": 1, + } + ) + botm_data.append( + { + "filename": f"botm_l{k + 1}.bin", + "binary": True, + "iprn": 1, + "data": np.full(vor.ncpl, botm[k], dtype=float), + } + ) + strt_data.append( + { + "filename": f"strt_l{k + 1}.bin", + "binary": True, + "iprn": 1, + "data": np.full(vor.ncpl, strt[k], dtype=float), + } + ) + else: + idomain_data = { + "filename": "idomain.bin", + "binary": True, + "iprn": 1, + "data": 1, + } + botm_data = { + "filename": "botm.bin", + "binary": True, + "iprn": 1, + "data": np.array( + [ + np.full(vor.ncpl, botm[0], dtype=float), + np.full(vor.ncpl, botm[1], dtype=float), + ] + ), + } + strt_data = { + "filename": "strt.bin", + "binary": True, + "iprn": 1, + "data": np.array( + [ + np.full(vor.ncpl, strt[0], dtype=float), + np.full(vor.ncpl, strt[1], dtype=float), + ] + ), + } + + # binary data that does not vary by layers + top_data = { + "filename": "top.bin", + "binary": True, + "iprn": 1, + "data": 10.0, + } + rch_data = { + 0: { + "filename": "recharge.bin", + "binary": True, + "iprn": 1, + "data": np.full(vor.ncpl, 0.000001, dtype=float), + }, + } + chd_data = [ + (1, 0, 10.0, 1.0, 100.0), + (1, 1, 10.0, 1.0, 100.0), + (1, 2, 5.0, 0.0, 100.0), + (1, 3, 5.0, 0.0, 100.0), + ] + chd_data = { + 0: { + "filename": "chd.bin", + "binary": True, + "data": chd_data, + }, + } + + # build model + sim = MFSimulation(sim_ws=str(function_tmpdir)) + ModflowTdis(sim) + ModflowIms(sim, complexity="simple") + gwf = ModflowGwf(sim, print_input=True) + flopy.mf6.ModflowGwfdisv( + gwf, + nlay=nlay, + ncpl=vor.ncpl, + nvert=vor.nverts, + vertices=vor.get_disv_gridprops()["vertices"], + cell2d=vor.get_disv_gridprops()["cell2d"], + top=top_data, + botm=botm_data, + idomain=idomain_data, + xorigin=0.0, + yorigin=0.0, + ) + ModflowGwfnpf( + gwf, + icelltype=1, + ) + ModflowGwfic( + gwf, + strt=strt_data, + ) + ModflowGwfrcha(gwf, recharge=rch_data) + ModflowGwfchd( + gwf, + auxiliary=["conc", "something"], + stress_period_data=chd_data, + ) + sim.write_simulation() + success, buff = sim.run_simulation() + assert success + + +def test_binary_read(function_tmpdir): + test_ex_name = "binary_read" + nlay = 3 + nrow = 10 + ncol = 10 + + modelgrid = flopy.discretization.StructuredGrid(nlay=nlay, nrow=nrow, ncol=ncol) + + arr = np.arange(nlay * nrow * ncol).astype(np.float64) + data_shape = (nlay, nrow, ncol) + data_size = nlay * nrow * ncol + arr.shape = data_shape + + sim_data = MFSimulationData("integration", None) + dstruct = MFDataItemStructure() + dstruct.is_cellid = False + dstruct.name = "fake" + dstruct.data_items = [ + None, + ] + mfstruct = MFDataStructure(dstruct, False, "ic", None) + mfstruct.data_item_structures = [ + dstruct, + ] + mfstruct.path = [ + "fake", + ] + + md = ModelDimensions("test", None) + pd = PackageDimensions([md], None, "integration") + dd = DataDimensions(pd, mfstruct) + + binfile = function_tmpdir / "structured_layered.hds" + with open(binfile, "wb") as foo: + for ix, a in enumerate(arr): + write_head(foo, a, ilay=ix) + + fa = MFFileAccessArray(mfstruct, dd, sim_data, None, None) + + # test path as both Path and str + for bf in [binfile, str(binfile)]: + arr2 = fa.read_binary_data_from_file( + bf, data_shape, data_size, np.float64, modelgrid + )[0] + + assert np.allclose(arr, arr2), ( + "Binary read for layered structured failed with " + + ("Path" if isinstance(binfile, Path) else "str") + ) + + binfile = function_tmpdir / "structured_flat.hds" + with open(binfile, "wb") as foo: + a = np.expand_dims(np.ravel(arr), axis=0) + write_head(foo, a, ilay=1) + + arr2 = fa.read_binary_data_from_file( + binfile, data_shape, data_size, np.float64, modelgrid + )[0] + + assert np.allclose(arr, arr2), "Binary read for flat Structured failed" + + ncpl = nrow * ncol + data_shape = (nlay, ncpl) + arr.shape = data_shape + modelgrid = flopy.discretization.VertexGrid(nlay=nlay, ncpl=ncpl) + + fa = MFFileAccessArray(mfstruct, dd, sim_data, None, None) + + binfile = function_tmpdir / "vertex_layered.hds" + with open(binfile, "wb") as foo: + tarr = arr.reshape((nlay, 1, ncpl)) + for ix, a in enumerate(tarr): + write_head(foo, a, ilay=ix) + + arr2 = fa.read_binary_data_from_file( + binfile, data_shape, data_size, np.float64, modelgrid + )[0] + + assert np.allclose(arr, arr2), "Binary read for layered Vertex failed" + + binfile = function_tmpdir / "vertex_flat.hds" + with open(binfile, "wb") as foo: + a = np.expand_dims(np.ravel(arr), axis=0) + write_head(foo, a, ilay=1) + + arr2 = fa.read_binary_data_from_file( + binfile, data_shape, data_size, np.float64, modelgrid + )[0] + + assert np.allclose(arr, arr2), "Binary read for flat Vertex failed" + + nlay = 3 + ncpl = [50, 100, 150] + data_shape = (np.sum(ncpl),) + arr.shape = data_shape + modelgrid = flopy.discretization.UnstructuredGrid(ncpl=ncpl) + + fa = MFFileAccessArray(mfstruct, dd, sim_data, None, None) + + binfile = function_tmpdir / "unstructured.hds" + with open(binfile, "wb") as foo: + a = np.expand_dims(arr, axis=0) + write_head(foo, a, ilay=1) + + arr2 = fa.read_binary_data_from_file( + binfile, data_shape, data_size, np.float64, modelgrid + )[0] + + assert np.allclose(arr, arr2), "Binary read for Unstructured failed" + + +@requires_exe("mf6") +def test_props_and_write(function_tmpdir): + # workspace as str + sim = MFSimulation(sim_ws=str(function_tmpdir)) + assert isinstance(sim, MFSimulation) + assert sim.simulation_data.mfpath.get_sim_path() == function_tmpdir == sim.sim_path + + # workspace as Path + sim = MFSimulation(sim_ws=function_tmpdir) + assert isinstance(sim, MFSimulation) + assert sim.simulation_data.mfpath.get_sim_path() == function_tmpdir == sim.sim_path + + tdis = ModflowTdis(sim) + assert isinstance(tdis, ModflowTdis) + + gwfgwf = ModflowGwfgwf(sim, exgtype="gwf6-gwf6", exgmnamea="gwf1", exgmnameb="gwf2") + assert isinstance(gwfgwf, ModflowGwfgwf) + + gwf = ModflowGwf(sim) + assert isinstance(gwf, ModflowGwf) + + ims = ModflowIms(sim) + assert isinstance(ims, ModflowIms) + sim.register_ims_package(ims, []) + + dis = ModflowGwfdis(gwf) + assert isinstance(dis, ModflowGwfdis) + + disu = ModflowGwfdisu(gwf) + assert isinstance(disu, ModflowGwfdisu) + + disv = ModflowGwfdisv(gwf) + assert isinstance(disv, ModflowGwfdisv) + + npf = ModflowGwfnpf(gwf) + assert isinstance(npf, ModflowGwfnpf) + + ic = ModflowGwfic(gwf) + assert isinstance(ic, ModflowGwfic) + + sto = ModflowGwfsto(gwf) + assert isinstance(sto, ModflowGwfsto) + + hfb = ModflowGwfhfb(gwf) + assert isinstance(hfb, ModflowGwfhfb) + + gnc = ModflowGwfgnc(gwf) + assert isinstance(gnc, ModflowGwfgnc) + + chd = ModflowGwfchd(gwf) + assert isinstance(chd, ModflowGwfchd) + + wel = ModflowGwfwel(gwf) + assert isinstance(wel, ModflowGwfwel) + + drn = ModflowGwfdrn(gwf) + assert isinstance(drn, ModflowGwfdrn) + + riv = ModflowGwfriv(gwf) + assert isinstance(riv, ModflowGwfriv) + + ghb = ModflowGwfghb(gwf) + assert isinstance(ghb, ModflowGwfghb) + + rch = ModflowGwfrch(gwf) + assert isinstance(rch, ModflowGwfrch) + + rcha = ModflowGwfrcha(gwf) + assert isinstance(rcha, ModflowGwfrcha) + + evt = ModflowGwfevt(gwf) + assert isinstance(evt, ModflowGwfevt) + + evta = ModflowGwfevta(gwf) + assert isinstance(evta, ModflowGwfevta) + + maw = ModflowGwfmaw(gwf) + assert isinstance(maw, ModflowGwfmaw) + + sfr = ModflowGwfsfr(gwf) + assert isinstance(sfr, ModflowGwfsfr) + + lak = ModflowGwflak(gwf) + assert isinstance(lak, ModflowGwflak) + + uzf = ModflowGwfuzf(gwf) + assert isinstance(uzf, ModflowGwfuzf) + + mvr = ModflowGwfmvr(gwf) + assert isinstance(mvr, ModflowGwfmvr) + + # Write files + sim.write_simulation() + + # Verify files were written + assert os.path.isfile(os.path.join(str(function_tmpdir), "mfsim.nam")) + exts_model = [ + "nam", + "dis", + "disu", + "disv", + "npf", + "ic", + "sto", + "hfb", + "gnc", + "chd", + "wel", + "drn", + "riv", + "ghb", + "rch", + "rcha", + "evt", + "evta", + "maw", + "sfr", + "lak", + "mvr", + ] + exts_sim = ["gwfgwf", "ims", "tdis"] + for ext in exts_model: + fname = os.path.join(str(function_tmpdir), f"model.{ext}") + assert os.path.isfile(fname), f"{fname} not found" + for ext in exts_sim: + fname = os.path.join(str(function_tmpdir), f"sim.{ext}") + assert os.path.isfile(fname), f"{fname} not found" + + +@pytest.mark.parametrize("use_paths", [True, False]) +def test_set_sim_path(function_tmpdir, use_paths): + sim_name = "testsim" + model_name = "testmodel" + exe_name = "mf6" + + # set up simulation + tdis_name = f"{sim_name}.tdis" + sim = MFSimulation( + sim_name=sim_name, + version="mf6", + exe_name=exe_name, + sim_ws=function_tmpdir, + ) + + new_ws = function_tmpdir / "new_ws" + new_ws.mkdir() + sim.set_sim_path(new_ws if use_paths else str(new_ws)) + + tdis_rc = [(6.0, 2, 1.0), (6.0, 3, 1.0)] + tdis = mftdis.ModflowTdis(sim, time_units="DAYS", nper=2, perioddata=tdis_rc) + + # create model instance + model = mfgwf.ModflowGwf( + sim, modelname=model_name, model_nam_file=f"{model_name}.nam" + ) + + sim.write_simulation() + + assert len([p for p in function_tmpdir.glob("*") if p.is_file()]) == 0 + assert len([p for p in new_ws.glob("*") if p.is_file()]) > 0 + + +@requires_exe("mf6") +@pytest.mark.parametrize("use_paths", [True, False]) +def test_create_and_run_model(function_tmpdir, use_paths): + # names + sim_name = "testsim" + model_name = "testmodel" + exe_name = "mf6" + + # set up simulation + tdis_name = f"{sim_name}.tdis" + if use_paths: + sim = MFSimulation( + sim_name=sim_name, + version="mf6", + exe_name=Path(which(exe_name)), + sim_ws=function_tmpdir, + ) + else: + sim = MFSimulation( + sim_name=sim_name, + version="mf6", + exe_name=str(exe_name), + sim_ws=str(function_tmpdir), + ) + tdis_rc = [(6.0, 2, 1.0), (6.0, 3, 1.0)] + tdis = mftdis.ModflowTdis(sim, time_units="DAYS", nper=2, perioddata=tdis_rc) + + # create model instance + model = mfgwf.ModflowGwf( + sim, modelname=model_name, model_nam_file=f"{model_name}.nam" + ) + + # create solution and add the model + ims_package = mfims.ModflowIms( + sim, + print_option="ALL", + complexity="SIMPLE", + outer_dvclose=0.00001, + outer_maximum=50, + under_relaxation="NONE", + inner_maximum=30, + inner_dvclose=0.00001, + linear_acceleration="CG", + preconditioner_levels=7, + preconditioner_drop_tolerance=0.01, + number_orthogonalizations=2, + ) + sim.register_ims_package(ims_package, [model_name]) + + # add packages to model + dis_package = mfgwfdis.ModflowGwfdis( + model, + length_units="FEET", + nlay=1, + nrow=1, + ncol=10, + delr=500.0, + delc=500.0, + top=100.0, + botm=50.0, + filename=f"{model_name}.dis", + ) + ic_package = mfgwfic.ModflowGwfic( + model, + strt=[100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0], + filename=f"{model_name}.ic", + ) + npf_package = mfgwfnpf.ModflowGwfnpf(model, save_flows=True, icelltype=1, k=100.0) + + sto_package = mfgwfsto.ModflowGwfsto( + model, save_flows=True, iconvert=1, ss=0.000001, sy=0.15 + ) + + wel_package = mfgwfwel.ModflowGwfwel( + model, + print_input=True, + print_flows=True, + save_flows=True, + maxbound=2, + stress_period_data=[((0, 0, 4), -2000.0), ((0, 0, 7), -2.0)], + ) + wel_package.stress_period_data.add_transient_key(1) + wel_package.stress_period_data.set_data([((0, 0, 4), -200.0)], 1) + + drn_package = mfgwfdrn.ModflowGwfdrn( + model, + print_input=True, + print_flows=True, + save_flows=True, + maxbound=1, + stress_period_data=[((0, 0, 0), 80, 60.0)], + ) + + riv_package = mfgwfriv.ModflowGwfriv( + model, + print_input=True, + print_flows=True, + save_flows=True, + maxbound=1, + stress_period_data=[((0, 0, 9), 110, 90.0, 100.0)], + ) + oc_package = mfgwfoc.ModflowGwfoc( + model, + budget_filerecord=[f"{model_name}.cbc"], + head_filerecord=[f"{model_name}.hds"], + saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], + printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], + ) + oc_package.saverecord.add_transient_key(1) + oc_package.saverecord.set_data([("HEAD", "ALL"), ("BUDGET", "ALL")], 1) + oc_package.printrecord.add_transient_key(1) + oc_package.printrecord.set_data([("HEAD", "ALL"), ("BUDGET", "ALL")], 1) + + # write the simulation input files + sim.write_simulation() + + # run the simulation and look for output + success, buff = sim.run_simulation() + assert success + + +@requires_exe("mf6") +def test_get_set_data_record(function_tmpdir): + # names + sim_name = "testrecordsim" + model_name = "testrecordmodel" + exe_name = "mf6" + + # set up simulation + tdis_name = f"{sim_name}.tdis" + sim = MFSimulation( + sim_name=sim_name, + version="mf6", + exe_name=exe_name, + sim_ws=str(function_tmpdir), + ) + tdis_rc = [(10.0, 4, 1.0), (6.0, 3, 1.0)] + tdis = mftdis.ModflowTdis(sim, time_units="DAYS", nper=2, perioddata=tdis_rc) + + # create model instance + model = mfgwf.ModflowGwf( + sim, modelname=model_name, model_nam_file=f"{model_name}.nam" + ) + + # create solution and add the model + ims_package = mfims.ModflowIms( + sim, + print_option="ALL", + complexity="SIMPLE", + outer_dvclose=0.00001, + outer_maximum=50, + under_relaxation="NONE", + inner_maximum=30, + inner_dvclose=0.00001, + linear_acceleration="CG", + preconditioner_levels=7, + preconditioner_drop_tolerance=0.01, + number_orthogonalizations=2, + ) + sim.register_ims_package(ims_package, [model_name]) + + # add packages to model + dis_package = mfgwfdis.ModflowGwfdis( + model, + length_units="FEET", + nlay=3, + nrow=10, + ncol=10, + delr=500.0, + delc=500.0, + top=100.0, + botm=[50.0, 10.0, -50.0], + filename=f"{model_name}.dis", + ) + ic_package = mfgwfic.ModflowGwfic( + model, + strt=[100.0, 90.0, 80.0], + filename=f"{model_name}.ic", + ) + npf_package = mfgwfnpf.ModflowGwfnpf( + model, save_flows=True, icelltype=1, k=50.0, k33=1.0 + ) + + sto_package = mfgwfsto.ModflowGwfsto( + model, save_flows=True, iconvert=1, ss=0.000001, sy=0.15 + ) + # wel packages + period_one = ModflowGwfwel.stress_period_data.empty( + model, + maxbound=3, + aux_vars=["var1", "var2", "var3"], + boundnames=True, + timeseries=True, + ) + period_one[0][0] = ((0, 9, 2), -50.0, -1, -2, -3, None) + period_one[0][1] = ((1, 4, 7), -100.0, 1, 2, 3, "well_1") + period_one[0][2] = ((1, 3, 2), -20.0, 4, 5, 6, "well_2") + period_two = ModflowGwfwel.stress_period_data.empty( + model, + maxbound=2, + aux_vars=["var1", "var2", "var3"], + boundnames=True, + timeseries=True, + ) + period_two[0][0] = ((2, 3, 2), -80.0, 1, 2, 3, "well_2") + period_two[0][1] = ((2, 4, 7), -10.0, 4, 5, 6, "well_1") + stress_period_data = {} + stress_period_data[0] = period_one[0] + stress_period_data[1] = period_two[0] + wel_package = ModflowGwfwel( + model, + print_input=True, + print_flows=True, + auxiliary=[("var1", "var2", "var3")], + maxbound=5, + stress_period_data=stress_period_data, + boundnames=True, + save_flows=True, + ) + # rch package + rch_period_list = [] + for row in range(0, 10): + for col in range(0, 10): + rch_amt = (1 + row / 10) * (1 + col / 10) + rch_period_list.append(((0, row, col), rch_amt, 0.5)) + rch_period = {} + rch_period[0] = rch_period_list + rch_package = ModflowGwfrch( + model, + fixed_cell=True, + auxiliary="MULTIPLIER", + auxmultname="MULTIPLIER", + print_input=True, + print_flows=True, + save_flows=True, + maxbound=54, + stress_period_data=rch_period, + ) + + # write simulation to new location + sim.set_all_data_external() + sim.write_simulation() + + # test get_record, set_record for list data + wel = model.get_package("wel") + spd_record = wel.stress_period_data.get_record() + well_sp_1 = spd_record[0] + assert well_sp_1["filename"] == "testrecordmodel.wel_stress_period_data_1.txt" + assert well_sp_1["binary"] is False + assert well_sp_1["data"][0][0] == (0, 9, 2) + assert well_sp_1["data"][0][1] == -50.0 + # modify + del well_sp_1["filename"] + well_sp_1["data"][0][0] = (1, 9, 2) + well_sp_2 = spd_record[1] + del well_sp_2["filename"] + well_sp_2["data"][0][0] = (1, 1, 1) + # save + spd_record[0] = well_sp_1 + spd_record[1] = well_sp_2 + wel.stress_period_data.set_record(spd_record) + # verify changes + spd_record = wel.stress_period_data.get_record() + well_sp_1 = spd_record[0] + assert "filename" not in well_sp_1 + assert well_sp_1["data"][0][0] == (1, 9, 2) + assert well_sp_1["data"][0][1] == -50.0 + well_sp_2 = spd_record[1] + assert "filename" not in well_sp_2 + assert well_sp_2["data"][0][0] == (1, 1, 1) + spd = wel.stress_period_data.get_data() + assert spd[0][0][0] == (1, 9, 2) + # change well_sp_2 back to external + well_sp_2["filename"] = "wel_spd_data_2.txt" + spd_record[1] = well_sp_2 + wel.stress_period_data.set_record(spd_record) + # change well_sp_2 data + spd[1][0][0] = (1, 2, 2) + wel.stress_period_data.set_data(spd) + # verify changes + spd_record = wel.stress_period_data.get_record() + well_sp_2 = spd_record[1] + assert well_sp_2["filename"] == "wel_spd_data_2.txt" + assert well_sp_2["data"][0][0] == (1, 2, 2) + + # test get_data/set_data vs get_record/set_record + dis = model.get_package("dis") + botm = dis.botm.get_record() + assert len(botm) == 3 + layer_2 = botm[1] + layer_3 = botm[2] + # verify layer 2 + assert layer_2["filename"] == "testrecordmodel.dis_botm_layer2.txt" + assert layer_2["binary"] is False + assert layer_2["factor"] == 1.0 + assert layer_2["iprn"] is None + assert layer_2["data"][0][0] == 10.0 + # change and set layer 2 + layer_2["filename"] = "botm_layer2.txt" + layer_2["binary"] = True + layer_2["iprn"] = 3 + layer_2["factor"] = 2.0 + layer_2["data"] = layer_2["data"] * 0.5 + botm[1] = layer_2 + # change and set layer 3 + del layer_3["filename"] + layer_3["factor"] = 0.5 + layer_3["data"] = layer_3["data"] * 2.0 + botm[2] = layer_3 + dis.botm.set_record(botm) + + # get botm in two different ways, verifying changes made + botm_record = dis.botm.get_record() + layer_1 = botm_record[0] + assert layer_1["filename"] == "testrecordmodel.dis_botm_layer1.txt" + assert layer_1["binary"] is False + assert layer_1["iprn"] is None + assert layer_1["data"][0][0] == 50.0 + layer_2 = botm_record[1] + assert layer_2["filename"] == "botm_layer2.txt" + assert layer_2["binary"] is True + assert layer_2["factor"] == 2.0 + assert layer_2["iprn"] == 3 + assert layer_2["data"][0][0] == 5.0 + layer_3 = botm_record[2] + assert "filename" not in layer_3 + assert layer_3["factor"] == 0.5 + assert layer_3["data"][0][0] == -100.0 + botm_data = dis.botm.get_data(apply_mult=True) + assert botm_data[0][0][0] == 50.0 + assert botm_data[1][0][0] == 10.0 + assert botm_data[2][0][0] == -50.0 + botm_data = dis.botm.get_data() + assert botm_data[0][0][0] == 50.0 + assert botm_data[1][0][0] == 5.0 + assert botm_data[2][0][0] == -100.0 + # modify and set botm data with set_data + botm_data[0][0][0] = 6.0 + botm_data[1][0][0] = -8.0 + botm_data[2][0][0] = -205.0 + dis.botm.set_data(botm_data) + # verify that data changed and metadata did not change + botm_record = dis.botm.get_record() + layer_1 = botm_record[0] + assert layer_1["filename"] == "testrecordmodel.dis_botm_layer1.txt" + assert layer_1["binary"] is False + assert layer_1["iprn"] is None + assert layer_1["data"][0][0] == 6.0 + assert layer_1["data"][0][1] == 50.0 + layer_2 = botm_record[1] + assert layer_2["filename"] == "botm_layer2.txt" + assert layer_2["binary"] is True + assert layer_2["factor"] == 2.0 + assert layer_2["iprn"] == 3 + assert layer_2["data"][0][0] == -8.0 + assert layer_2["data"][0][1] == 5.0 + layer_3 = botm_record[2] + assert "filename" not in layer_3 + assert layer_3["factor"] == 0.5 + assert layer_3["data"][0][0] == -205.0 + botm_data = dis.botm.get_data() + assert botm_data[0][0][0] == 6.0 + assert botm_data[1][0][0] == -8.0 + assert botm_data[2][0][0] == -205.0 + + spd_record = rch_package.stress_period_data.get_record() + assert 0 in spd_record + assert isinstance(spd_record[0], dict) + assert "filename" in spd_record[0] + assert spd_record[0]["filename"] == "testrecordmodel.rch_stress_period_data_1.txt" + assert "binary" in spd_record[0] + assert spd_record[0]["binary"] is False + assert "data" in spd_record[0] + assert spd_record[0]["data"][0][0] == (0, 0, 0) + spd_record[0]["data"][0][0] = (0, 0, 8) + rch_package.stress_period_data.set_record(spd_record) + + spd_data = rch_package.stress_period_data.get_data() + assert spd_data[0][0][0] == (0, 0, 8) + spd_data[0][0][0] = (0, 0, 7) + rch_package.stress_period_data.set_data(spd_data) + + spd_record = rch_package.stress_period_data.get_record() + assert isinstance(spd_record[0], dict) + assert "filename" in spd_record[0] + assert spd_record[0]["filename"] == "testrecordmodel.rch_stress_period_data_1.txt" + assert "binary" in spd_record[0] + assert spd_record[0]["binary"] is False + assert "data" in spd_record[0] + assert spd_record[0]["data"][0][0] == (0, 0, 7) + + sim.write_simulation() + + +@requires_exe("mf6") +def test_output(function_tmpdir, example_data_path): + ex_name = "test001e_UZF_3lay" + sim_ws = example_data_path / "mf6" / ex_name + sim = MFSimulation.load(sim_ws=sim_ws, exe_name="mf6") + sim.set_sim_path(str(function_tmpdir)) + sim.write_simulation() + success, buff = sim.run_simulation() + assert success, f"simulation {sim.name} did not run" + + ml = sim.get_model("gwf_1") + + bud = ml.oc.output.budget() + budcsv = ml.oc.output.budgetcsv() + assert budcsv.file.closed + hds = ml.oc.output.head() + lst = ml.oc.output.list() + + idomain = np.ones(ml.modelgrid.shape, dtype=int) + zonbud = ml.oc.output.zonebudget(idomain) + + assert isinstance(bud, CellBudgetFile) + assert isinstance(budcsv, CsvFile) + assert isinstance(hds, HeadFile) + assert isinstance(zonbud, ZoneBudget6) + assert isinstance(lst, Mf6ListBudget) + + bud = ml.output.budget() + budcsv = ml.output.budgetcsv() + hds = ml.output.head() + zonbud = ml.output.zonebudget(idomain) + lst = ml.output.list() + + assert isinstance(bud, CellBudgetFile) + assert isinstance(budcsv, CsvFile) + assert isinstance(hds, HeadFile) + assert isinstance(zonbud, ZoneBudget6) + assert isinstance(lst, Mf6ListBudget) + + uzf = ml.uzf + uzf_bud = uzf.output.budget() + uzf_budcsv = uzf.output.budgetcsv() + conv = uzf.output.package_convergence() + uzf_obs = uzf.output.obs() + uzf_zonbud = uzf.output.zonebudget(idomain) + + assert isinstance(uzf_bud, CellBudgetFile) + assert isinstance(uzf_budcsv, CsvFile) + if conv is not None: + assert isinstance(conv, CsvFile) + assert isinstance(uzf_obs, Mf6Obs) + assert isinstance(uzf_zonbud, ZoneBudget6) + assert ml.dis.output.methods() is None + + +@requires_exe("mf6") +@pytest.mark.slow +def test_output_add_observation(function_tmpdir, example_data_path): + model_name = "lakeex2a" + sim_ws = str(example_data_path / "mf6" / "test045_lake2tr") + sim = MFSimulation.load(sim_ws=sim_ws, exe_name="mf6") + gwf = sim.get_model(model_name) + + # remove sfr_obs and add a new sfr obs + sfr = gwf.sfr + obs_file = f"{model_name}.sfr.obs" + csv_file = f"{obs_file}.csv" + obs_dict = { + csv_file: [ + ("l08_stage", "stage", (8,)), + ("l09_stage", "stage", (9,)), + ("l14_stage", "stage", (14,)), + ("l15_stage", "stage", (15,)), + ] + } + gwf.sfr.obs.initialize( + filename=obs_file, digits=10, print_input=True, continuous=obs_dict + ) + + sim.set_sim_path(str(function_tmpdir)) + sim.write_simulation() + + success, buff = sim.run_simulation() + assert success, f"simulation {sim.name} did not run" + + # check that .output finds the newly added OBS package + sfr_obs = gwf.sfr.output.obs() + + assert isinstance(sfr_obs, Mf6Obs), ( + "remove and add observation test (Mf6Output) failed" + ) + + +@requires_exe("mf6") +def test_sfr_connections(function_tmpdir, example_data_path): + """MODFLOW just warns if any reaches are unconnected + flopy fails to load model if reach 1 is unconnected, fine with other unconnected + """ + + data_path = example_data_path / "mf6" / "test666_sfrconnections" + sim_ws = function_tmpdir + for test in ["sfr0", "sfr1"]: + sim_name = "test_sfr" + model_name = "test_sfr" + tdis_name = f"{sim_name}.tdis" + sim = MFSimulation( + sim_name=sim_name, version="mf6", exe_name="mf6", sim_ws=sim_ws + ) + tdis_rc = [(1.0, 1, 1.0)] + tdis = ModflowTdis(sim, time_units="DAYS", nper=1, perioddata=tdis_rc) + ims_package = ModflowIms( + sim, + pname="my_ims_file", + filename=f"{sim_name}.ims", + print_option="ALL", + complexity="SIMPLE", + ) + model = ModflowGwf( + sim, modelname=model_name, model_nam_file=f"{model_name}.nam" + ) + + dis = ModflowGwfdis( + model, + length_units="FEET", + nlay=1, + nrow=5, + ncol=5, + delr=5000.0, + delc=5000.0, + top=100.0, + botm=-100.0, + filename=f"{model_name}.dis", + ) + ic_package = ModflowGwfic(model, filename=f"{model_name}.ic") + npf_package = ModflowGwfnpf( + model, + pname="npf", + save_flows=True, + alternative_cell_averaging="logarithmic", + icelltype=1, + k=50.0, + ) + + cnfile = f"mf6_{test}_connection.txt" + pkfile = f"mf6_{test}_package.txt" + + with open(data_path / pkfile, "r") as f: + nreaches = len(f.readlines()) + sfr = ModflowGwfsfr( + model, + packagedata={"filename": str(data_path / pkfile)}, + connectiondata={"filename": str(data_path / cnfile)}, + nreaches=nreaches, + pname="sfr", + unit_conversion=86400, + ) + sim.set_all_data_external() + sim.write_simulation() + success, buff = sim.run_simulation() + assert success, f"simulation {sim.name} did not run" + + # reload simulation + sim2 = MFSimulation.load(sim_ws=sim_ws) + sim2.set_all_data_external() + sim2.write_simulation() + success, buff = sim2.run_simulation() + assert success, f"simulation {sim2.name} did not run after being reloaded" + + # test sfr recarray data + model2 = sim2.get_model() + sfr2 = model2.get_package("sfr") + sfr_pd = sfr2.packagedata + rec_data = [ + (0, 0, 0, 0, 1.0, 1.0, 0.01, 10.0, 1.0, 1.0, 1.0, 1, 1.0, 0), + (1, 0, 1, 0, 1.0, 1.0, 0.01, 10.0, 1.0, 1.0, 1.0, 2, 1.0, 0), + ] + rec_type = [ + ("ifno", int), + ("layer", int), + ("row", int), + ("column", int), + ("rlen", float), + ("rwid", float), + ("rgrd", float), + ("rtp", float), + ("rbth", float), + ("rhk", float), + ("man", float), + ("nconn", int), + ("ustrf", float), + ("nvd", int), + ] + pkg_data = np.rec.array(rec_data, rec_type) + sfr_pd.set_record({"data": pkg_data}) + data = sfr_pd.get_data() + assert data[0][1] == (0, 0, 0) + + +@requires_exe("mf6") +def test_array(function_tmpdir): + # get_data + # empty data in period block vs data repeating + # array + # aux values, test that they work the same as other arrays (is a value + # of zero always used even if aux is defined in a previous stress + # period?) + + sim_name = "test_array" + model_name = "test_array" + out_dir = function_tmpdir + tdis_name = f"{sim_name}.tdis" + sim = MFSimulation(sim_name=sim_name, version="mf6", exe_name="mf6", sim_ws=out_dir) + tdis_rc = [(6.0, 2, 1.0), (6.0, 3, 1.0), (6.0, 3, 1.0), (6.0, 3, 1.0)] + tdis = ModflowTdis(sim, time_units="DAYS", nper=4, perioddata=tdis_rc) + ims_package = ModflowIms( + sim, + pname="my_ims_file", + filename=f"{sim_name}.ims", + print_option="ALL", + complexity="SIMPLE", + outer_dvclose=0.0001, + outer_maximum=50, + under_relaxation="NONE", + inner_maximum=30, + inner_dvclose=0.0001, + linear_acceleration="CG", + preconditioner_levels=7, + preconditioner_drop_tolerance=0.01, + number_orthogonalizations=2, + ) + model = ModflowGwf(sim, modelname=model_name, model_nam_file=f"{model_name}.nam") + + dis = ModflowGwfdis( + model, + length_units="FEET", + nlay=4, + nrow=2, + ncol=2, + delr=5000.0, + delc=5000.0, + top=100.0, + botm=[50.0, 0.0, -50.0, -100.0], + filename=f"{model_name} 1.dis", + ) + ic_package = ModflowGwfic(model, strt=90.0, filename=f"{model_name}.ic") + npf_package = ModflowGwfnpf( + model, + pname="npf_1", + save_flows=True, + alternative_cell_averaging="logarithmic", + icelltype=1, + k=50.0, + ) + + oc_package = ModflowGwfoc( + model, + budget_filerecord=[("test_array.cbc",)], + head_filerecord=[("test_array.hds",)], + saverecord={ + 0: [("HEAD", "ALL"), ("BUDGET", "ALL")], + 1: [], + }, + printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], + ) + + aux = {1: [[50.0], [1.3]], 3: [[200.0], [1.5]]} + irch = {1: [[0, 2], [2, 1]], 2: [[0, 1], [2, 3]]} + rcha = ModflowGwfrcha( + model, + print_input=True, + print_flows=True, + auxiliary=[("var1", "var2")], + irch=irch, + recharge={1: 0.0001, 2: 0.00001}, + aux=aux, + ) + val_irch = rcha.irch.array.sum(axis=(1, 2, 3)) + assert val_irch[0] == 4 + assert val_irch[1] == 5 + assert val_irch[2] == 6 + assert val_irch[3] == 6 + val_irch_2 = rcha.irch.get_data() + assert val_irch_2[0] is None + assert val_irch_2[1][1, 1] == 1 + assert val_irch_2[2][1, 1] == 3 + assert val_irch_2[3] is None + val_irch_2_3 = rcha.irch.get_data(3) + assert val_irch_2_3 is None + val_rch = rcha.recharge.array.sum(axis=(1, 2, 3)) + assert val_rch[0] == 0.0 + assert val_rch[1] == 0.0004 + assert val_rch[2] == 0.00004 + assert val_rch[3] == 0.00004 + val_rch_2 = rcha.recharge.get_data() + assert val_rch_2[0] is None + assert val_rch_2[1][0, 0] == 0.0001 + assert val_rch_2[2][0, 0] == 0.00001 + assert val_rch_2[3] is None + aux_data_0 = rcha.aux.get_data(0) + assert aux_data_0 is None + aux_data_1 = rcha.aux.get_data(1) + assert aux_data_1[0][0][0] == 50.0 + aux_data_2 = rcha.aux.get_data(2) + assert aux_data_2 is None + aux_data_3 = rcha.aux.get_data(3) + assert aux_data_3[0][0][0] == 200.0 + + welspdict = {1: [[(0, 0, 0), 0.25, 0.0]], 2: [[(0, 0, 0), 0.1, 0.0]]} + wel = ModflowGwfwel( + model, + print_input=True, + print_flows=True, + mover=True, + stress_period_data=welspdict, + save_flows=False, + auxiliary="CONCENTRATION", + pname="WEL-1", + ) + wel_array = wel.stress_period_data.array + #print(type(wel.stress_period_data)) + #print(type(wel.stress_period_data.array)) + assert wel_array[0] is None + assert wel_array[1][0][1] == 0.25 + assert wel_array[2][0][1] == 0.1 + assert wel_array[3][0][1] == 0.1 + + drnspdict = { + 0: [[(0, 0, 0), 60.0, 10.0]], + 2: [], + 3: [[(0, 0, 0), 55.0, 5.0]], + } + drn = ModflowGwfdrn( + model, + print_input=True, + print_flows=True, + stress_period_data=drnspdict, + save_flows=False, + pname="DRN-1", + ) + drn_array = drn.stress_period_data.array + assert drn_array[0][0][1] == 60.0 + assert drn_array[1][0][1] == 60.0 + assert drn_array[2] is None + assert drn_array[3][0][1] == 55.0 + drn_gd_0 = drn.stress_period_data.get_data(0) + assert drn_gd_0[0][1] == 60.0 + drn_gd_1 = drn.stress_period_data.get_data(1) + assert drn_gd_1 is None + drn_gd_2 = drn.stress_period_data.get_data(2) + assert len(drn_gd_2) == 0 + drn_gd_3 = drn.stress_period_data.get_data(3) + assert drn_gd_3[0][1] == 55.0 + + ghbspdict = { + 0: [[(0, 1, 1), 60.0, 10.0]], + } + ghb = ModflowGwfghb( + model, + print_input=True, + print_flows=True, + stress_period_data=ghbspdict, + save_flows=False, + pname="GHB-1", + ) + + lakpd = [(0, 70.0, 1), (1, 65.0, 1)] + lakecn = [ + (0, 0, (0, 0, 0), "HORIZONTAL", 1.0, 60.0, 90.0, 10.0, 1.0), + (1, 0, (0, 1, 1), "HORIZONTAL", 1.0, 60.0, 90.0, 10.0, 1.0), + ] + lak_tables = [(0, "lak01.tab"), (1, "lak02.tab")] + lak = ModflowGwflak( + model, + pname="lak", + print_input=True, + mover=True, + nlakes=2, + noutlets=0, + ntables=1, + packagedata=lakpd, + connectiondata=lakecn, + tables=lak_tables, + ) + + table_01 = [ + (30.0, 100000.0, 10000.0), + (40.0, 200500.0, 10100.0), + (50.0, 301200.0, 10130.0), + (60.0, 402000.0, 10180.0), + (70.0, 503000.0, 10200.0), + (80.0, 700000.0, 20000.0), + ] + lak_tab = ModflowUtllaktab( + model, + filename="lak01.tab", + nrow=6, + ncol=3, + table=table_01, + ) + + table_02 = [ + (40.0, 100000.0, 10000.0), + (50.0, 200500.0, 10100.0), + (60.0, 301200.0, 10130.0), + (70.0, 402000.0, 10180.0), + (80.0, 503000.0, 10200.0), + (90.0, 700000.0, 20000.0), + ] + lak_tab_2 = ModflowUtllaktab( + model, + filename="lak02.tab", + nrow=6, + ncol=3, + table=table_02, + ) + wel_name_1 = wel.name[0] + lak_name_2 = lak.name[0] + package_data = [(wel_name_1,), (lak_name_2,)] + period_data = [(wel_name_1, 0, lak_name_2, 0, "FACTOR", 1.0)] + fname = f"{model.name}.input.mvr" + mvr = ModflowGwfmvr( + parent_model_or_package=model, + filename=fname, + print_input=True, + print_flows=True, + maxpackages=2, + maxmvr=1, + packages=package_data, + perioddata=period_data, + ) + + # test writing and loading model + sim.write_simulation() + sim.run_simulation() + + test_sim = MFSimulation.load( + sim_name, + "mf6", + "mf6", + out_dir, + write_headers=False, + ) + model = test_sim.get_model() + dis = model.get_package("dis") + rcha = model.get_package("rcha") + wel = model.get_package("wel") + drn = model.get_package("drn") + lak = model.get_package("lak") + lak_tab = model.get_package("laktab") + assert os.path.split(dis.filename)[1] == f"{model_name} 1.dis" + # do same tests as above + val_irch = rcha.irch.array.sum(axis=(1, 2, 3)) + assert val_irch[0] == 4 + assert val_irch[1] == 5 + assert val_irch[2] == 6 + assert val_irch[3] == 6 + val_irch_2 = rcha.irch.get_data() + assert val_irch_2[0] is None + assert val_irch_2[1][1, 1] == 1 + assert val_irch_2[2][1, 1] == 3 + assert val_irch_2[3] is None + val_rch = rcha.recharge.array.sum(axis=(1, 2, 3)) + assert val_rch[0] == 0.0 + assert val_rch[1] == 0.0004 + assert val_rch[2] == 0.00004 + assert val_rch[3] == 0.00004 + val_rch_2 = rcha.recharge.get_data() + assert val_rch_2[0] is None + assert val_rch_2[1][0, 0] == 0.0001 + assert val_rch_2[2][0, 0] == 0.00001 + assert val_rch_2[3] is None + aux_data_0 = rcha.aux.get_data(0) + assert aux_data_0 is None + aux_data_1 = rcha.aux.get_data(1) + assert aux_data_1[0][0][0] == 50.0 + aux_data_2 = rcha.aux.get_data(2) + assert aux_data_2 is None + aux_data_3 = rcha.aux.get_data(3) + assert aux_data_3[0][0][0] == 200.0 + + wel_array = wel.stress_period_data.array + assert wel_array[0] is None + assert wel_array[1][0][1] == 0.25 + assert wel_array[2][0][1] == 0.1 + assert wel_array[3][0][1] == 0.1 + + drn_array = drn.stress_period_data.array + assert drn_array[0][0][1] == 60.0 + assert drn_array[1][0][1] == 60.0 + assert drn_array[2] is None + assert drn_array[3][0][1] == 55.0 + drn_gd_0 = drn.stress_period_data.get_data(0) + assert drn_gd_0[0][1] == 60.0 + drn_gd_1 = drn.stress_period_data.get_data(1) + assert drn_gd_1 is None + drn_gd_2 = drn.stress_period_data.get_data(2) + assert len(drn_gd_2) == 0 + drn_gd_3 = drn.stress_period_data.get_data(3) + assert drn_gd_3[0][1] == 55.0 + + lak_tab_array = lak.tables.get_data() + assert lak_tab_array[0][1] == "lak01.tab" + assert lak_tab_array[1][1] == "lak02.tab" + + assert len(lak_tab) == 2 + lak_tab_1 = lak_tab[0].table.get_data() + assert lak_tab_1[0][0] == 30.0 + assert lak_tab_1[5][2] == 20000.0 + lak_tab_2 = lak_tab[1].table.get_data() + assert lak_tab_2[0][0] == 40.0 + assert lak_tab_2[4][1] == 503000.0 + + +@requires_exe("mf6") +def test_grid_array(function_tmpdir): + # get_data + # empty data in period block vs data repeating + # array + # aux values, test that they work the same as other arrays (is a value + # of zero always used even if aux is defined in a previous stress + # period?) + + sim_name = "test_grid_array" + model_name = "test_grid_array" + out_dir = function_tmpdir + tdis_name = f"{sim_name}.tdis" + sim = MFSimulation(sim_name=sim_name, version="mf6", exe_name="mf6", sim_ws=out_dir) + tdis_rc = [(6.0, 2, 1.0), (6.0, 3, 1.0), (6.0, 3, 1.0), (6.0, 3, 1.0)] + tdis = ModflowTdis(sim, time_units="DAYS", nper=4, perioddata=tdis_rc) + ims_package = ModflowIms( + sim, + pname="my_ims_file", + filename=f"{sim_name}.ims", + print_option="ALL", + complexity="SIMPLE", + outer_dvclose=0.0001, + outer_maximum=50, + under_relaxation="NONE", + inner_maximum=30, + inner_dvclose=0.0001, + linear_acceleration="CG", + preconditioner_levels=7, + preconditioner_drop_tolerance=0.01, + number_orthogonalizations=2, + ) + model = ModflowGwf(sim, modelname=model_name, model_nam_file=f"{model_name}.nam") + + dis = ModflowGwfdis( + model, + length_units="FEET", + nlay=4, + nrow=2, + ncol=2, + delr=5000.0, + delc=5000.0, + top=100.0, + botm=[50.0, 0.0, -50.0, -100.0], + filename=f"{model_name} 1.dis", + ) + ic_package = ModflowGwfic(model, strt=90.0, filename=f"{model_name}.ic") + npf_package = ModflowGwfnpf( + model, + pname="npf_1", + save_flows=True, + alternative_cell_averaging="logarithmic", + icelltype=1, + k=50.0, + ) + + oc_package = ModflowGwfoc( + model, + budget_filerecord=[("test_array.cbc",)], + head_filerecord=[("test_array.hds",)], + saverecord={ + 0: [("HEAD", "ALL"), ("BUDGET", "ALL")], + 1: [], + }, + printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], + ) + + aux = {1: [[50.0], [1.3]], 3: [[200.0], [1.5]]} + irch = {1: [[0, 2], [2, 1]], 2: [[0, 1], [2, 3]]} + rcha = ModflowGwfrcha( + model, + print_input=True, + print_flows=True, + auxiliary=[("var1", "var2")], + irch=irch, + recharge={1: 0.0001, 2: 0.00001}, + aux=aux, + ) + print(f"RENO RCHA") + print(np.shape(rcha.recharge.array)) + print(rcha.recharge.array) + print(rcha.recharge.get_data()) + print(f"RENO RCHA AUX") + print(rcha.aux.get_data(0)) + print(rcha.aux.get_data(1)) + print(rcha.aux.get_data(2)) + print(rcha.aux.get_data(3)) + val_irch = rcha.irch.array.sum(axis=(1, 2, 3)) + assert val_irch[0] == 4 + assert val_irch[1] == 5 + assert val_irch[2] == 6 + assert val_irch[3] == 6 + val_irch_2 = rcha.irch.get_data() + assert val_irch_2[0] is None + assert val_irch_2[1][1, 1] == 1 + assert val_irch_2[2][1, 1] == 3 + assert val_irch_2[3] is None + val_irch_2_3 = rcha.irch.get_data(3) + assert val_irch_2_3 is None + val_rch = rcha.recharge.array.sum(axis=(1, 2, 3)) + assert val_rch[0] == 0.0 + assert val_rch[1] == 0.0004 + assert val_rch[2] == 0.00004 + assert val_rch[3] == 0.00004 + val_rch_2 = rcha.recharge.get_data() + assert val_rch_2[0] is None + assert val_rch_2[1][0, 0] == 0.0001 + assert val_rch_2[2][0, 0] == 0.00001 + assert val_rch_2[3] is None + aux_data_0 = rcha.aux.get_data(0) + assert aux_data_0 is None + aux_data_1 = rcha.aux.get_data(1) + assert aux_data_1[0][0][0] == 50.0 + aux_data_2 = rcha.aux.get_data(2) + assert aux_data_2 is None + aux_data_3 = rcha.aux.get_data(3) + assert aux_data_3[0][0][0] == 200.0 + +# welspdict = {1: [[(0, 0, 0), 0.25, 0.0]], 2: [[(0, 0, 0), 0.1, 0.0]]} +# wel = ModflowGwfwel( +# model, +# print_input=True, +# print_flows=True, +# mover=True, +# stress_period_data=welspdict, +# save_flows=False, +# auxiliary="CONCENTRATION", +# pname="WEL-1", +# ) +# wel_array = wel.stress_period_data.array +# assert wel_array[0] is None +# assert wel_array[1][0][1] == 0.25 +# assert wel_array[2][0][1] == 0.1 +# assert wel_array[3][0][1] == 0.1 + + nlay = dis.nlay.get_data() + nrow = dis.nrow.get_data() + ncol = dis.ncol.get_data() + + DNODATA = 3.0e30 # MF6 DNODATA constant + welqspd = {} + welconcspd = {} + for n in range(2): + #for n in range(4): + q = np.full((nlay, nrow, ncol), DNODATA, dtype=float) + welconc = np.full((nlay, nrow, ncol), DNODATA, dtype=float) + welaux2 = np.full((nlay, nrow, ncol), DNODATA, dtype=float) + #if n == 0: + if n == 1: + q[0, 0, 0] = 0.25 + welconc[0, 0, 0] = 0.0 + welaux2[0, 0, 0] = 9.0 + #elif n == 1: + elif n == 2: + q[0, 0, 0] = 0.1 + welconc[0, 0, 0] = 0.0 + welaux2[0, 0, 0] = 9.0 + #welqspd[n + 1] = q + #welconcspd[n + 1] = [welconc] + welqspd[n] = q + welconcspd[n] = [welconc, welaux2] + + # first create test package with multiple auxvars + wel = ModflowGwfwelg( + model, + print_input=True, + print_flows=True, + mover=True, + save_flows=False, + auxiliary=["var1", "var2"], + pname="WEL-1", + q=welqspd, + #aux={1: [[50.0], [1.3]], 3: [[200.0], [1.5]]}, + aux=welconcspd, + ) + + print(wel.q.array) + print("FULLONE:") + print(wel.q.get_data()) + print("FULLTWO:") + print(wel.aux.array) + print("FULLONEAUX:") + print(wel.aux.get_data()) + print("FULLTWOAUX:") + aux = wel.aux.array + print(type(aux)) + print(dir(aux)) + print(np.shape(aux)) + print("1") + print(wel.aux.array[1, 0]) + print("2") + print(wel.aux.array[1, 1]) + assert np.allclose(wel.aux.array[0], wel.aux.get_data(0)[0]) + assert np.allclose(wel.aux.array[1], wel.aux.get_data(0)[1]) + assert np.allclose(wel.aux.array[2], wel.aux.get_data(1)[0]) + assert np.allclose(wel.aux.array[3], wel.aux.get_data(1)[1]) + assert np.allclose(wel.aux.array[4], wel.aux.get_data(2)[0]) + assert np.allclose(wel.aux.array[5], wel.aux.get_data(2)[1]) + assert np.allclose(wel.aux.array[6], wel.aux.get_data(3)[0]) + assert np.allclose(wel.aux.array[7], wel.aux.get_data(3)[1]) + + + assert False + assert wel.q.get_data()[0] is None + assert wel.q.get_data(0) is None + assert np.allclose(wel.q.get_data()[1], wel.q.get_data(1)) + assert np.allclose(wel.q.get_data()[2], wel.q.get_data(2)) + assert len(wel.q.array) == 4 + print(wel.q.array[1]) + print("ONE:") + print(wel.q.get_data(1)) + print("TWO:") + print(wel.q.array) + print("FULLONE:") + print(wel.q.get_data()) + print("FULLTWO:") + assert np.allclose(wel.q.array[1], wel.q.get_data(1)) + assert np.allclose(wel.q.array[2], wel.q.get_data(2)) + assert wel.q.get_data()[3] is None + assert wel.q.get_data(3) is None + + + sim.write_simulation() + assert False + print("RENO WELG") + #print(welqspd) + assert not wel.has_stress_period_data + print(wel.q.array) + print(wel.q.get_data()) + print(np.shape(wel.q.array)) + q_nan = np.where(wel.q.array == DNODATA, np.nan, wel.q.array) + val_q = np.nansum(q_nan, axis=(1, 2, 3, 4)) + assert val_q[0] == 0.0 + assert val_q[1] == 0.25 + assert val_q[2] == 0.1 + assert val_q[3] == 0.1 + val_q_2 = wel.q.get_data() + assert val_q_2[0] is None + assert val_q_2[1][0, 0, 0] == 0.25 + assert val_q_2[2][0, 0, 0] == 0.1 + assert val_q_2[3] is None + print("RENO WELG AUX") + print(wel.aux.array) + print(wel.aux.get_data(0)) + print(wel.aux.get_data(1)) + print(wel.aux.get_data(2)) + print(wel.aux.get_data(3)) + print(wel.aux.array) + aux_data_0 = wel.aux.get_data(0) + assert aux_data_0 is None + aux_data_1 = wel.aux.get_data(1) + print(aux_data_1) + assert aux_data_1[0][0][0][0] == 50.0 + assert aux_data_1[1][0][0][0] == 1.3 + aux_data_2 = wel.aux.get_data(2) + assert aux_data_2 is None + aux_data_3 = wel.aux.get_data(3) + assert aux_data_3[0][0][0][0] == 200.0 + assert aux_data_3[1][0][0][0] == 1.5 + #print(wel.stress_period_data) + for k in wel.q._data_storage.keys(): + print(f"RENO k={k}") + wel.q.get_data_prep(k) + print(wel.q._data_storage[k].get_data()) + + print(type(wel.q)) + print(dir(wel.q)) + print(type(wel.q[0])) + print(dir(wel.q[0])) + #assert wel.q[0] is None + #assert wel.q[1[0][1] == 0.25 + + # remove test wel package + wel.remove() + + # create welg package + wel = ModflowGwfwelg( + model, + print_input=True, + print_flows=True, + mover=True, + save_flows=False, + auxiliary=["CONCENTRATION"], + pname="WEL-1", + q=welqspd, + aux=welconcspd, + ) + + print("RENO WELG 2") + print(wel) + assert not wel.has_stress_period_data + print(wel.q.array) + print(wel.q.get_data()) + print(np.shape(wel.q.array)) + q_nan = np.where(wel.q.array == DNODATA, np.nan, wel.q.array) + val_q = np.nansum(q_nan, axis=(1, 2, 3, 4)) + assert val_q[0] == 0.0 + assert val_q[1] == 0.25 + assert val_q[2] == 0.1 + assert val_q[3] == 0.1 + val_q_2 = wel.q.get_data() + assert val_q_2[0] is None + assert val_q_2[1][0, 0, 0] == 0.25 + assert val_q_2[2][0, 0, 0] == 0.1 + assert val_q_2[3] is None + print("RENO WELG AUX 2") + #print(welconcspd) + print(wel.aux.get_data()) + print(wel.aux.array) + print(wel.aux.array) + print(wel.aux.get_data(0)) + print(wel.aux.array) + print(wel.aux.get_data(1)) + print(wel.aux.get_data(2)) + print(wel.aux.get_data(3)) + print(wel.aux.array) + aux_data_0 = wel.aux.get_data(0) + print(wel.aux.array) + assert aux_data_0 is None + aux_data_1 = wel.aux.get_data(1) + print(aux_data_1) + assert aux_data_1[0][0][0][0] == 0.0 + assert aux_data_1[0][0, 0, 0] == 0.0 + aux_data_2 = wel.aux.get_data(2) + #assert aux_data_2[0][0][0] == 0.0 + assert aux_data_2[0][0, 0, 0] == 0.0 + aux_data_3 = wel.aux.get_data(3) + assert aux_data_3 is None + + print(wel.aux.get_data()) + print(wel.aux.array) + #assert False + + drnspdict = { + 0: [[(0, 0, 0), 60.0, 10.0]], + 2: [], + 3: [[(0, 0, 0), 55.0, 5.0]], + } + drn = ModflowGwfdrn( + model, + print_input=True, + print_flows=True, + stress_period_data=drnspdict, + save_flows=False, + pname="DRN-1", + ) + drn_array = drn.stress_period_data.array + assert drn_array[0][0][1] == 60.0 + assert drn_array[1][0][1] == 60.0 + assert drn_array[2] is None + assert drn_array[3][0][1] == 55.0 + drn_gd_0 = drn.stress_period_data.get_data(0) + assert drn_gd_0[0][1] == 60.0 + drn_gd_1 = drn.stress_period_data.get_data(1) + assert drn_gd_1 is None + drn_gd_2 = drn.stress_period_data.get_data(2) + assert len(drn_gd_2) == 0 + drn_gd_3 = drn.stress_period_data.get_data(3) + assert drn_gd_3[0][1] == 55.0 + + ghbspdict = { + 0: [[(0, 1, 1), 60.0, 10.0]], + } + ghb = ModflowGwfghb( + model, + print_input=True, + print_flows=True, + stress_period_data=ghbspdict, + save_flows=False, + pname="GHB-1", + ) + + lakpd = [(0, 70.0, 1), (1, 65.0, 1)] + lakecn = [ + (0, 0, (0, 0, 0), "HORIZONTAL", 1.0, 60.0, 90.0, 10.0, 1.0), + (1, 0, (0, 1, 1), "HORIZONTAL", 1.0, 60.0, 90.0, 10.0, 1.0), + ] + lak_tables = [(0, "lak01.tab"), (1, "lak02.tab")] + lak = ModflowGwflak( + model, + pname="lak", + print_input=True, + mover=True, + nlakes=2, + noutlets=0, + ntables=1, + packagedata=lakpd, + connectiondata=lakecn, + tables=lak_tables, + ) + + table_01 = [ + (30.0, 100000.0, 10000.0), + (40.0, 200500.0, 10100.0), + (50.0, 301200.0, 10130.0), + (60.0, 402000.0, 10180.0), + (70.0, 503000.0, 10200.0), + (80.0, 700000.0, 20000.0), + ] + lak_tab = ModflowUtllaktab( + model, + filename="lak01.tab", + nrow=6, + ncol=3, + table=table_01, + ) + + table_02 = [ + (40.0, 100000.0, 10000.0), + (50.0, 200500.0, 10100.0), + (60.0, 301200.0, 10130.0), + (70.0, 402000.0, 10180.0), + (80.0, 503000.0, 10200.0), + (90.0, 700000.0, 20000.0), + ] + lak_tab_2 = ModflowUtllaktab( + model, + filename="lak02.tab", + nrow=6, + ncol=3, + table=table_02, + ) + wel_name_1 = wel.name[0] + lak_name_2 = lak.name[0] + package_data = [(wel_name_1,), (lak_name_2,)] + period_data = [(wel_name_1, 0, lak_name_2, 0, "FACTOR", 1.0)] + fname = f"{model.name}.input.mvr" + mvr = ModflowGwfmvr( + parent_model_or_package=model, + filename=fname, + print_input=True, + print_flows=True, + maxpackages=2, + maxmvr=1, + packages=package_data, + perioddata=period_data, + ) + + # test writing and loading model + print(wel.aux.array) + sim.write_simulation() + print(wel.aux.array) + sim.run_simulation() + print(wel.aux.array) + + test_sim = MFSimulation.load( + sim_name, + "mf6", + "mf6", + out_dir, + write_headers=False, + ) + model = test_sim.get_model() + dis = model.get_package("dis") + rcha = model.get_package("rcha") + wel = model.get_package("wel") + drn = model.get_package("drn") + lak = model.get_package("lak") + lak_tab = model.get_package("laktab") + assert os.path.split(dis.filename)[1] == f"{model_name} 1.dis" + # do same tests as above + val_irch = rcha.irch.array.sum(axis=(1, 2, 3)) + assert val_irch[0] == 4 + assert val_irch[1] == 5 + assert val_irch[2] == 6 + assert val_irch[3] == 6 + val_irch_2 = rcha.irch.get_data() + assert val_irch_2[0] is None + assert val_irch_2[1][1, 1] == 1 + assert val_irch_2[2][1, 1] == 3 + assert val_irch_2[3] is None + val_rch = rcha.recharge.array.sum(axis=(1, 2, 3)) + assert val_rch[0] == 0.0 + assert val_rch[1] == 0.0004 + assert val_rch[2] == 0.00004 + assert val_rch[3] == 0.00004 + val_rch_2 = rcha.recharge.get_data() + assert val_rch_2[0] is None + assert val_rch_2[1][0, 0] == 0.0001 + assert val_rch_2[2][0, 0] == 0.00001 + assert val_rch_2[3] is None + aux_data_0 = rcha.aux.get_data(0) + assert aux_data_0 is None + aux_data_1 = rcha.aux.get_data(1) + assert aux_data_1[0][0][0] == 50.0 + aux_data_2 = rcha.aux.get_data(2) + assert aux_data_2 is None + aux_data_3 = rcha.aux.get_data(3) + assert aux_data_3[0][0][0] == 200.0 + + # RENO TODO + #wel_array = wel.stress_period_data.array + #assert wel_array[0] is None + #assert wel_array[1][0][1] == 0.25 + #assert wel_array[2][0][1] == 0.1 + #assert wel_array[3][0][1] == 0.1 + welg_q_per = wel.q.get_data() + #print(welg_q_per) + #print(wel.aux.get_data()) + print(wel.q.array) + print(wel.aux.array) + return + assert welg_q_per[0] == None + assert welg_q_per[1][0, 0, 0] == 0.25 + assert welg_q_per[2][0, 0, 0] == 0.1 + assert welg_q_per[3][0, 0, 0] == 0.1 + + + welg_q_per1 = wel.q.get_data(1) + print(wel.q.array) + assert welg_q_per1[0, 0, 0] == 0.25 + welg_aux_per1 = wel.aux.get_data(1) + assert welg_aux_per1[0][0, 0, 0] == 0.0 + + drn_array = drn.stress_period_data.array + assert drn_array[0][0][1] == 60.0 + assert drn_array[1][0][1] == 60.0 + assert drn_array[2] is None + assert drn_array[3][0][1] == 55.0 + drn_gd_0 = drn.stress_period_data.get_data(0) + assert drn_gd_0[0][1] == 60.0 + drn_gd_1 = drn.stress_period_data.get_data(1) + assert drn_gd_1 is None + drn_gd_2 = drn.stress_period_data.get_data(2) + assert len(drn_gd_2) == 0 + drn_gd_3 = drn.stress_period_data.get_data(3) + assert drn_gd_3[0][1] == 55.0 + + lak_tab_array = lak.tables.get_data() + assert lak_tab_array[0][1] == "lak01.tab" + assert lak_tab_array[1][1] == "lak02.tab" + + assert len(lak_tab) == 2 + lak_tab_1 = lak_tab[0].table.get_data() + assert lak_tab_1[0][0] == 30.0 + assert lak_tab_1[5][2] == 20000.0 + lak_tab_2 = lak_tab[1].table.get_data() + assert lak_tab_2[0][0] == 40.0 + assert lak_tab_2[4][1] == 503000.0 + + +@requires_exe("mf6") +def test_multi_model(function_tmpdir): + # init paths + test_ex_name = "test_multi_model" + model_names = ["gwf_model_1", "gwf_model_2", "gwt_model_1", "gwt_model_2"] + + # temporal discretization + nper = 1 + perlen = [5.0] + nstp = [200] + tsmult = [1.0] + tdis_rc = [] + for i in range(nper): + tdis_rc.append((perlen[i], nstp[i], tsmult[i])) + + # build MODFLOW 6 files + sim = MFSimulation( + sim_name=test_ex_name, + version="mf6", + exe_name="mf6", + sim_ws=str(function_tmpdir), + ) + # create tdis package + tdis = ModflowTdis( + sim, time_units="DAYS", nper=nper, perioddata=tdis_rc, pname="sim.tdis" + ) + + # grid information + nlay, nrow, ncol = 1, 1, 50 + + # Create gwf1 model + welspd = {0: [[(0, 0, 0), 1.0, 1.0]]} + chdspd = None + gwf1 = get_gwf_model( + sim, + model_names[0], + model_names[0], + (nlay, nrow, ncol), + chdspd=chdspd, + welspd=welspd, + ) + + # Create gwf2 model + welspd = {0: [[(0, 0, 1), 0.5, 0.5]]} + chdspd = {0: [[(0, 0, ncol - 1), 0.0000000]]} + gwf2 = get_gwf_model( + sim, + model_names[1], + model_names[1], + (nlay, nrow, ncol), + chdspd=chdspd, + welspd=welspd, + ) + lakpd = [(0, -100.0, 1)] + lakecn = [(0, 0, (0, 0, 0), "HORIZONTAL", 1.0, 0.1, 1.0, 10.0, 1.0)] + lak_2 = ModflowGwflak( + gwf2, + pname="lak2", + print_input=True, + mover=True, + nlakes=1, + noutlets=0, + ntables=0, + packagedata=lakpd, + connectiondata=lakecn, + ) + + # gwf-gwf + gwfgwf_data = [] + for col in range(0, ncol): + gwfgwf_data.append([(0, 0, col), (0, 0, col), 1, 0.5, 0.5, 1.0, 0.0, 1.0]) + gwfgwf = ModflowGwfgwf( + sim, + exgtype="GWF6-GWF6", + nexg=len(gwfgwf_data), + exgmnamea=gwf1.name, + exgmnameb=gwf2.name, + exchangedata=gwfgwf_data, + auxiliary=["ANGLDEGX", "CDIST"], + filename="flow1_flow2.gwfgwf", + ) + # set up mvr package + wel_1 = gwf1.get_package("wel") + wel_1.mover.set_data(True) + wel_name_1 = wel_1.name[0] + lak_name_2 = lak_2.name[0] + package_data = [(gwf1.name, wel_name_1), (gwf2.name, lak_name_2)] + period_data = [(gwf1.name, wel_name_1, 0, gwf2.name, lak_name_2, 0, "FACTOR", 1.0)] + fname = "gwfgwf.input.mvr" + gwfgwf.mvr.initialize( + filename=fname, + modelnames=True, + print_input=True, + print_flows=True, + maxpackages=2, + maxmvr=1, + packages=package_data, + perioddata=period_data, + ) + + gnc_data = [] + for col in range(0, ncol): + if col < ncol / 2.0: + gnc_data.append(((0, 0, col), (0, 0, col), (0, 0, col + 1), 0.25)) + else: + gnc_data.append(((0, 0, col), (0, 0, col), (0, 0, col - 1), 0.25)) + + # set up gnc package + fname = "gwfgwf.input.gnc" + gwfgwf.gnc.initialize( + filename=fname, + print_input=True, + print_flows=True, + numgnc=ncol, + numalphaj=1, + gncdata=gnc_data, + ) + + # Observe flow for exchange + gwfgwfobs = {} + obs_list = [] + for col in range(0, ncol): + obs_list.append([f"exchange_flow_{col}", "FLOW-JA-FACE", (col,)]) + gwfgwfobs["gwfgwf.output.obs.csv"] = obs_list + fname = "gwfgwf.input.obs" + gwfgwf.obs.initialize( + filename=fname, digits=25, print_input=True, continuous=gwfgwfobs + ) + + # Create gwt model + sourcerecarray = [("WEL-1", "AUX", "CONCENTRATION")] + gwt = get_gwt_model( + sim, + model_names[2], + model_names[2], + (nlay, nrow, ncol), + sourcerecarray=sourcerecarray, + ) + + # GWF GWT exchange + gwfgwt = ModflowGwfgwt( + sim, + exgtype="GWF6-GWT6", + exgmnamea=model_names[0], + exgmnameb=model_names[2], + filename="flow1_transport1.gwfgwt", + ) + + # solver settings + nouter, ninner = 100, 300 + hclose, rclose, relax = 1e-6, 1e-6, 1.0 + + # create iterative model solution and register the gwf model with it + imsgwf = ModflowIms( + sim, + print_option="SUMMARY", + outer_dvclose=hclose, + outer_maximum=nouter, + under_relaxation="NONE", + inner_maximum=ninner, + inner_dvclose=hclose, + rcloserecord=rclose, + linear_acceleration="BICGSTAB", + scaling_method="NONE", + reordering_method="NONE", + relaxation_factor=relax, + filename="flow.ims", + ) + + # create iterative model solution and register the gwt model with it + imsgwt = ModflowIms( + sim, + print_option="SUMMARY", + outer_dvclose=hclose, + outer_maximum=nouter, + under_relaxation="NONE", + inner_maximum=ninner, + inner_dvclose=hclose, + rcloserecord=rclose, + linear_acceleration="BICGSTAB", + scaling_method="NONE", + reordering_method="NONE", + relaxation_factor=relax, + filename="transport.ims", + ) + sim.register_ims_package(imsgwt, [gwt.name]) + + sim.write_simulation() + sim.run_simulation() + + # reload simulation + sim2 = MFSimulation.load(sim_ws=str(function_tmpdir)) + + # check ims registration + solution_recarray = sim2.name_file.solutiongroup + for solution_group_num in solution_recarray.get_active_key_list(): + rec_array = solution_recarray.get_data(solution_group_num[0]) + assert rec_array[0][1] == "flow.ims" + assert rec_array[0][2] == model_names[0] + assert rec_array[0][3] == model_names[1] + assert rec_array[1][1] == "transport.ims" + assert rec_array[1][2] == model_names[2] + assert gwf1.get_ims_package() is gwf2.get_ims_package() + assert gwf1.get_ims_package().filename == "flow.ims" + assert gwt.get_ims_package().filename == "transport.ims" + # test ssm fileinput + gwt2 = sim2.get_model("gwt_model_1") + ssm2 = gwt2.get_package("ssm") + fileinput = [ + ("RCH-1", "gwt_model_1.rch1.spc"), + ("RCH-2", "gwt_model_1.rch2.spc"), + ("RCH-3", "gwt_model_1.rch3.spc", "MIXED"), + ("RCH-4", "gwt_model_1.rch4.spc"), + ] + ssm2.fileinput = fileinput + fi_out = ssm2.fileinput.get_data() + assert fi_out[2][1] == "gwt_model_1.rch3.spc" + assert fi_out[1][2] is None + assert fi_out[2][2] == "MIXED" + + spca1 = ModflowUtlspca(gwt2, filename="gwt_model_1.rch1.spc", print_input=True) + spca2 = ModflowUtlspca(gwt2, filename="gwt_model_1.rch2.spc", print_input=False) + spca3 = ModflowUtlspca(gwt2, filename="gwt_model_1.rch3.spc", print_input=True) + spca4 = ModflowUtlspca(gwt2, filename="gwt_model_1.rch4.spc", print_input=True) + + # test writing and loading spca packages + sim2.write_simulation() + sim3 = MFSimulation.load(sim_ws=sim2.sim_path) + gwt3 = sim3.get_model("gwt_model_1") + spc1 = gwt3.get_package("gwt_model_1.rch1.spc") + assert isinstance(spc1, ModflowUtlspca) + assert spc1.print_input.get_data() is True + spc2 = gwt3.get_package("gwt_model_1.rch2.spc") + assert spc2.print_input.get_data() is not True + + # create a new gwt model + sourcerecarray = [("WEL-1", "AUX", "CONCENTRATION")] + gwt_2 = get_gwt_model( + sim, + model_names[3], + model_names[3], + (nlay, nrow, ncol), + sourcerecarray=sourcerecarray, + ) + # register gwt model with transport.ims + sim.register_ims_package(imsgwt, gwt_2.name) + # flow and transport exchange + gwfgwt = ModflowGwfgwt( + sim, + exgtype="GWF6-GWT6", + exgmnamea=model_names[1], + exgmnameb=model_names[3], + filename="flow2_transport2.gwfgwt", + ) + # save and run updated model + sim.write_simulation() + sim.run_simulation() + + with pytest.raises( + flopy.mf6.mfbase.FlopyException, + match='Extraneous kwargs "param_does_not_exist" provided to MFPackage.', + ): + # test kwargs error checking + wel = ModflowGwfwel( + gwf2, + print_input=True, + print_flows=True, + stress_period_data=welspd, + save_flows=False, + auxiliary="CONCENTRATION", + pname="WEL-1", + param_does_not_exist=True, + ) + + +@requires_exe("mf6") +def test_namefile_creation(function_tmpdir): + test_ex_name = "test_namefile" + # build MODFLOW 6 files + sim = MFSimulation( + sim_name=test_ex_name, + version="mf6", + exe_name="mf6", + sim_ws=str(function_tmpdir), + ) + + tdis_rc = [(6.0, 2, 1.0), (6.0, 3, 1.0), (6.0, 3, 1.0), (6.0, 3, 1.0)] + tdis = ModflowTdis(sim, time_units="DAYS", nper=4, perioddata=tdis_rc) + ims_package = ModflowIms( + sim, + pname="my_ims_file", + filename=f"{test_ex_name}.ims", + print_option="ALL", + complexity="SIMPLE", + outer_dvclose=0.0001, + outer_maximum=50, + under_relaxation="NONE", + inner_maximum=30, + inner_dvclose=0.0001, + linear_acceleration="CG", + preconditioner_levels=7, + preconditioner_drop_tolerance=0.01, + number_orthogonalizations=2, + ) + model = ModflowGwf( + sim, + modelname=test_ex_name, + model_nam_file=f"{test_ex_name}.nam", + ) + + # try to create simulation name file + ex_happened = False + try: + nam = ModflowNam(sim) + except flopy.mf6.mfbase.FlopyException: + ex_happened = True + assert ex_happened + + # try to create model name file + ex_happened = False + try: + nam = ModflowGwfnam(model) + except flopy.mf6.mfbase.FlopyException: + ex_happened = True + assert ex_happened + + +def test_remove_model(function_tmpdir, example_data_path): + # load a multi-model simulation + sim_ws = str(example_data_path / "mf6" / "test006_2models_mvr") + sim = MFSimulation.load(sim_ws=sim_ws, exe_name="mf6") + + # original simulation should contain models: + # - 'parent', with files named 'model1.ext' + # - 'child', with files named 'model2.ext' + assert len(sim.model_names) == 2 + assert "parent" in sim.model_names + assert "child" in sim.model_names + + # remove the child model + sim.remove_model("child") + + # simulation should now only contain the parent model + assert len(sim.model_names) == 1 + assert "parent" in sim.model_names + + # write simulation input files + sim.set_sim_path(function_tmpdir) + sim.write_simulation() + + # there should be no input files for the child model + files = list(function_tmpdir.glob("*")) + assert not any("model2" in f.name for f in files) + + # there should be no model or solver entry for the child model + # in the simulation namefile + lines = open(function_tmpdir / "mfsim.nam").readlines() + lines = [l.lower().strip() for l in lines] + assert not any("model2" in l for l in lines) + assert not any("child" in l for l in lines) + + # there should be no exchanges either + exg_index = 0 + for i, l in enumerate(lines): + if "begin exchanges" in l: + exg_index = i + elif exg_index > 0: + assert "end exchanges" in l + break + + +@requires_pkg("shapely") +@requires_exe("triangle") +def test_flopy_2283(function_tmpdir): + # create triangular grid + triangle_ws = function_tmpdir / "triangle" + triangle_ws.mkdir() + + active_area = [(0, 0), (0, 1000), (1000, 1000), (1000, 0)] + tri = Triangle(model_ws=triangle_ws, angle=30) + tri.add_polygon(active_area) + tri.add_region((1, 1), maximum_area=50**2) + + tri.build() + + # build vertex grid object + vgrid = flopy.discretization.VertexGrid( + vertices=tri.get_vertices(), + cell2d=tri.get_cell2d(), + xoff=199000, + yoff=215500, + crs=31370, + angrot=30, + ) + + # coord info is set (also correct when using vgrid.set_coord_info() + print(vgrid) + + # create MODFLOW 6 model + ws = function_tmpdir / "model" + ws.mkdir() + sim = flopy.mf6.MFSimulation(sim_name="prj-test", sim_ws=ws) + tdis = flopy.mf6.ModflowTdis(sim) + ims = flopy.mf6.ModflowIms(sim) + + gwf = flopy.mf6.ModflowGwf(sim, modelname="gwf") + disv = flopy.mf6.ModflowGwfdisv( + gwf, + xorigin=vgrid.xoffset, + yorigin=vgrid.yoffset, + angrot=vgrid.angrot, # no CRS info can be set in DISV + nlay=1, + top=0.0, + botm=-10.0, + ncpl=vgrid.ncpl, + nvert=vgrid.nvert, + cell2d=vgrid.cell2d, + vertices=tri.get_vertices(), # this is not stored in the Vertex grid object? + ) + + assert gwf.modelgrid.xoffset == disv.xorigin.get_data() + assert gwf.modelgrid.yoffset == disv.yorigin.get_data() + assert gwf.modelgrid.angrot == disv.angrot.get_data() From 9a1bdc84a7a27f29157cf1a5eb25c61057011bc3 Mon Sep 17 00:00:00 2001 From: mjreno Date: Wed, 27 Aug 2025 07:54:50 -0400 Subject: [PATCH 28/44] remove unintended files --- autotest/tmp/test_mf6.1.py | 2937 ------------------------------------ autotest/tmp/test_mf6.py | 2930 ----------------------------------- 2 files changed, 5867 deletions(-) delete mode 100644 autotest/tmp/test_mf6.1.py delete mode 100644 autotest/tmp/test_mf6.py diff --git a/autotest/tmp/test_mf6.1.py b/autotest/tmp/test_mf6.1.py deleted file mode 100644 index b1b91b95b7..0000000000 --- a/autotest/tmp/test_mf6.1.py +++ /dev/null @@ -1,2937 +0,0 @@ -import os -import platform -from pathlib import Path -from shutil import copytree, which - -import numpy as np -import pytest -from modflow_devtools.markers import requires_exe, requires_pkg -from modflow_devtools.misc import set_dir - -import flopy -from flopy.mf6 import ( - MFModel, - MFSimulation, - ModflowGwf, - ModflowGwfchd, - ModflowGwfdis, - ModflowGwfdisu, - ModflowGwfdisv, - ModflowGwfdrn, - ModflowGwfevt, - ModflowGwfevta, - ModflowGwfghb, - ModflowGwfgnc, - ModflowGwfgwf, - ModflowGwfgwt, - ModflowGwfhfb, - ModflowGwfic, - ModflowGwflak, - ModflowGwfmaw, - ModflowGwfmvr, - ModflowGwfnam, - ModflowGwfnpf, - ModflowGwfoc, - ModflowGwfrch, - ModflowGwfrcha, - ModflowGwfriv, - ModflowGwfsfr, - ModflowGwfsto, - ModflowGwfuzf, - ModflowGwfwel, - ModflowGwfwelg, - ModflowGwtadv, - ModflowGwtdis, - ModflowGwtic, - ModflowGwtmst, - ModflowGwtoc, - ModflowGwtssm, - ModflowIms, - ModflowNam, - ModflowTdis, - ModflowUtllaktab, - ModflowUtlspca, -) -from flopy.mf6.coordinates.modeldimensions import ( - DataDimensions, - ModelDimensions, - PackageDimensions, -) -from flopy.mf6.data.mffileaccess import MFFileAccessArray -from flopy.mf6.data.mfstructure import MFDataItemStructure, MFDataStructure -from flopy.mf6.mfsimbase import MFSimulationData -from flopy.mf6.modflow import ( - mfgwf, - mfgwfdis, - mfgwfdrn, - mfgwfic, - mfgwfnpf, - mfgwfoc, - mfgwfriv, - mfgwfsto, - mfgwfwel, - mfims, - mftdis, -) -from flopy.utils import CellBudgetFile, HeadFile, Mf6ListBudget, Mf6Obs, ZoneBudget6 -from flopy.utils.observationfile import CsvFile -from flopy.utils.triangle import Triangle -from flopy.utils.voronoi import VoronoiGrid - -pytestmark = pytest.mark.mf6 - - -def write_head( - fbin, - data, - kstp=1, - kper=1, - pertim=1.0, - totim=1.0, - text=" HEAD", - ilay=1, -): - dt = np.dtype( - [ - ("kstp", "i4"), - ("kper", "i4"), - ("pertim", "f8"), - ("totim", "f8"), - ("text", "S16"), - ("ncol", "i4"), - ("nrow", "i4"), - ("ilay", "i4"), - ] - ) - nrow = data.shape[0] - ncol = data.shape[1] - h = np.array((kstp, kper, pertim, totim, text, ncol, nrow, ilay), dtype=dt) - h.tofile(fbin) - data.tofile(fbin) - - -def get_gwf_model(sim, gwfname, gwfpath, modelshape, chdspd=None, welspd=None): - nlay, nrow, ncol = modelshape - delr = 1.0 - delc = 1.0 - top = 1.0 - botm = [0.0] - strt = 1.0 - hk = 1.0 - laytyp = 0 - - gwf = ModflowGwf( - sim, - modelname=gwfname, - save_flows=True, - ) - gwf.set_model_relative_path(gwfpath) - - dis = ModflowGwfdis( - gwf, - nlay=nlay, - nrow=nrow, - ncol=ncol, - delr=delr, - delc=delc, - top=top, - botm=botm, - ) - - # initial conditions - ic = ModflowGwfic(gwf, strt=strt) - - # node property flow - npf = ModflowGwfnpf( - gwf, - icelltype=laytyp, - k=hk, - save_specific_discharge=True, - ) - - # chd files - if chdspd is not None: - chd = ModflowGwfchd( - gwf, - stress_period_data=chdspd, - save_flows=False, - pname="CHD-1", - ) - - # wel files - if welspd is not None: - wel = ModflowGwfwel( - gwf, - print_input=True, - print_flows=True, - stress_period_data=welspd, - save_flows=False, - auxiliary="CONCENTRATION", - pname="WEL-1", - ) - - # output control - oc = ModflowGwfoc( - gwf, - budget_filerecord=f"{gwfname}.cbc", - head_filerecord=f"{gwfname}.hds", - headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")], - saverecord=[("HEAD", "LAST"), ("BUDGET", "LAST")], - printrecord=[("HEAD", "LAST"), ("BUDGET", "LAST")], - ) - return gwf - - -def get_gwt_model(sim, gwtname, gwtpath, modelshape, sourcerecarray=None): - nlay, nrow, ncol = modelshape - delr = 1.0 - delc = 1.0 - top = 1.0 - botm = [0.0] - strt = 1.0 - hk = 1.0 - laytyp = 0 - - gwt = MFModel( - sim, - model_type="gwt6", - modelname=gwtname, - model_rel_path=gwtpath, - ) - gwt.name_file.save_flows = True - - dis = ModflowGwtdis( - gwt, - nlay=nlay, - nrow=nrow, - ncol=ncol, - delr=delr, - delc=delc, - top=top, - botm=botm, - ) - - # initial conditions - ic = ModflowGwtic(gwt, strt=0.0) - - # advection - adv = ModflowGwtadv(gwt, scheme="upstream") - - # mass storage and transfer - mst = ModflowGwtmst(gwt, porosity=0.1) - - # sources - ssm = ModflowGwtssm(gwt, sources=sourcerecarray) - - # output control - oc = ModflowGwtoc( - gwt, - budget_filerecord=f"{gwtname}.cbc", - concentration_filerecord=f"{gwtname}.ucn", - concentrationprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")], - saverecord=[("CONCENTRATION", "LAST"), ("BUDGET", "LAST")], - printrecord=[("CONCENTRATION", "LAST"), ("BUDGET", "LAST")], - ) - return gwt - - -def to_win_sep(s): - return s.replace("/", "\\") - - -def to_posix_sep(s): - return s.replace("\\", "/") - - -def to_os_sep(s): - return s.replace("\\", os.sep).replace("/", os.sep) - - -@requires_exe("mf6") -def test_load_and_run_sim_when_namefile_uses_filenames( - function_tmpdir, example_data_path -): - # copy model input files to temp workspace - model_name = "mf6-freyberg" - workspace = function_tmpdir / model_name - copytree(example_data_path / model_name, workspace) - - # load, check and run simulation - sim = MFSimulation.load(sim_ws=workspace) - sim.check() - success, _ = sim.run_simulation(report=True) - assert success - - -@requires_exe("mf6") -def test_load_and_run_sim_when_namefile_uses_abs_paths( - function_tmpdir, example_data_path -): - # copy model input files to temp workspace - model_name = "freyberg" - workspace = function_tmpdir / "ws" - copytree(example_data_path / f"mf6-{model_name}", workspace) - - # sub abs paths into namefile - with set_dir(workspace): - nam_path = workspace / "mfsim.nam" - lines = open(nam_path).readlines() - with open(nam_path, "w") as f: - for l in lines: - pattern = f"{model_name}." - if pattern in l: - l = l.replace(pattern, str(workspace.absolute()) + os.sep + pattern) - f.write(l) - - # load, check and run simulation - sim = MFSimulation.load(sim_ws=workspace) - sim.check() - success, _ = sim.run_simulation(report=True) - assert success - - -@requires_exe("mf6") -@pytest.mark.parametrize("sep", ["win", "posix"]) -def test_load_sim_when_namefile_uses_rel_paths(function_tmpdir, example_data_path, sep): - # copy model input files to temp workspace - model_name = "freyberg" - workspace = function_tmpdir / "ws" - copytree(example_data_path / f"mf6-{model_name}", workspace) - - # sub rel paths into namefile - with set_dir(workspace): - nam_path = workspace / "mfsim.nam" - lines = open(nam_path).readlines() - with open(nam_path, "w") as f: - for l in lines: - pattern = f"{model_name}." - if pattern in l: - if sep == "win": - l = to_win_sep( - l.replace( - pattern, "../" + workspace.name + "/" + model_name + "." - ) - ) - else: - l = to_posix_sep( - l.replace( - pattern, "../" + workspace.name + "/" + model_name + "." - ) - ) - f.write(l) - - # load and check simulation - sim = MFSimulation.load(sim_ws=workspace) - sim.check() - - # don't run simulation with Windows sep on Linux or Mac - if sep == "win" and platform.system() != "Windows": - return - - # run simulation - success, _ = sim.run_simulation(report=True) - assert success - - -@pytest.mark.skip(reason="currently flopy uses OS-specific path separators") -@pytest.mark.parametrize("sep", ["win", "posix"]) -def test_write_simulation_always_writes_posix_path_separators( - function_tmpdir, example_data_path, sep -): - # copy model input files to temp workspace - model_name = "freyberg" - workspace = function_tmpdir / "ws" - copytree(example_data_path / f"mf6-{model_name}", workspace) - - # use OS-specific path separators - with set_dir(workspace): - nam_path = workspace / "mfsim.nam" - lines = open(nam_path).readlines() - with open(nam_path, "w") as f: - for l in lines: - pattern = f"{model_name}." - if pattern in l: - if sep == "win": - l = to_win_sep( - l.replace( - pattern, "../" + workspace.name + "/" + model_name + "." - ) - ) - else: - l = to_posix_sep( - l.replace( - pattern, "../" + workspace.name + "/" + model_name + "." - ) - ) - f.write(l) - - # load and write simulation - sim = MFSimulation.load(sim_ws=workspace) - sim.write_simulation() - - # make sure posix separators were written - lines = open(workspace / "mfsim.nam").readlines() - assert all("\\" not in l for l in lines) - - -@requires_exe("mf6") -@pytest.mark.parametrize("filename", ["name", "rel", "rel_win"]) -def test_basic_gwf(function_tmpdir, filename): - ws = function_tmpdir - name = "basic_gwf_prep" - sim = flopy.mf6.MFSimulation(sim_name=name, sim_ws=ws, exe_name="mf6") - pd = [(1.0, 1, 1.0), (1.0, 1, 1.0)] - - innerdir = Path(function_tmpdir / "inner") - innerdir.mkdir() - - # mfpackage filename can be path or string.. - # if string, it can either be a file name or - # path relative to the simulation workspace. - tdis_name = f"{name}.tdis" - tdis_path = innerdir / tdis_name - tdis_path.touch() - tdis_relpath = tdis_path.relative_to(ws).as_posix() - tdis_relpath_win = str(tdis_relpath).replace("/", "\\") - - if filename == "name": - # file named with no path will be created in simulation workspace - tdis = flopy.mf6.ModflowTdis( - sim, nper=len(pd), perioddata=pd, filename=tdis_name - ) - assert tdis.filename == tdis_name - elif filename == "rel": - # filename may be a relative pathlib.Path - tdis = flopy.mf6.ModflowTdis( - sim, nper=len(pd), perioddata=pd, filename=tdis_relpath - ) - assert tdis.filename == str(tdis_relpath) - - # relative paths may also be provided as strings - tdis = flopy.mf6.ModflowTdis( - sim, nper=len(pd), perioddata=pd, filename=str(tdis_relpath) - ) - assert tdis.filename == str(tdis_relpath) - elif filename == "rel_win": - # windows path backslash separator should be converted to forward slash - tdis = flopy.mf6.ModflowTdis( - sim, nper=len(pd), perioddata=pd, filename=tdis_relpath_win - ) - assert tdis.filename == str(tdis_relpath) - - # create other packages - ims = flopy.mf6.ModflowIms(sim) - gwf = flopy.mf6.ModflowGwf(sim, modelname=name, save_flows=True) - dis = flopy.mf6.ModflowGwfdis(gwf, nrow=10, ncol=10) - ic = flopy.mf6.ModflowGwfic(gwf) - npf = flopy.mf6.ModflowGwfnpf( - gwf, save_specific_discharge=True, save_saturation=True - ) - spd = { - 0: [[(0, 0, 0), 1.0, 1.0], [(0, 9, 9), 0.0, 0.0]], - 1: [[(0, 0, 0), 0.0, 0.0], [(0, 9, 9), 1.0, 2.0]], - } - chd = flopy.mf6.ModflowGwfchd( - gwf, pname="CHD-1", stress_period_data=spd, auxiliary=["concentration"] - ) - budget_file = f"{name}.bud" - head_file = f"{name}.hds" - oc = flopy.mf6.ModflowGwfoc( - gwf, - budget_filerecord=budget_file, - head_filerecord=head_file, - saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], - ) - - # write the simulation - sim.write_simulation() - - # check for input files - assert (ws / innerdir / tdis_name).is_file() - assert (ws / f"{name}.ims").is_file() - assert (ws / f"{name}.dis").is_file() - assert (ws / f"{name}.ic").is_file() - assert (ws / f"{name}.npf").is_file() - assert (ws / f"{name}.chd").is_file() - assert (ws / f"{name}.oc").is_file() - - # run the simulation - sim.run_simulation() - - # check for output files - assert (ws / budget_file).is_file() - assert (ws / head_file).is_file() - - -def test_subdir(function_tmpdir): - sim = MFSimulation(sim_ws=function_tmpdir) - assert sim.sim_path == function_tmpdir - - tdis = ModflowTdis(sim) - gwf = ModflowGwf(sim, model_rel_path="level2") - ims = ModflowIms(sim) - sim.register_ims_package(ims, []) - dis = ModflowGwfdis(gwf) - sim.set_all_data_external(external_data_folder="dat") - sim.write_simulation() - - sim_r = MFSimulation.load( - "mfsim.nam", - sim_ws=sim.simulation_data.mfpath.get_sim_path(), - ) - gwf_r = sim_r.get_model() - assert gwf.dis.delc.get_file_entry() == gwf_r.dis.delc.get_file_entry(), ( - "Something wrong with model external paths" - ) - - sim_r.set_all_data_internal() - sim_r.set_all_data_external(external_data_folder=os.path.join("dat", "dat_l2")) - sim_r.write_simulation() - - sim_r2 = MFSimulation.load( - "mfsim.nam", - sim_ws=sim_r.simulation_data.mfpath.get_sim_path(), - ) - gwf_r2 = sim_r.get_model() - assert gwf_r.dis.delc.get_file_entry() == gwf_r2.dis.delc.get_file_entry(), ( - "Something wrong with model external paths" - ) - - -@requires_exe("mf6") -@pytest.mark.parametrize("layered", [True, False]) -def test_binary_write(function_tmpdir, layered): - nlay, nrow, ncol = 2, 1, 10 - shape2d = (nrow, ncol) - - # data for layers - botm = [4.0, 0.0] - strt = [5.0, 10.0] - - # create binary data structured - if layered: - idomain_data = [] - botm_data = [] - strt_data = [] - for k in range(nlay): - idomain_data.append( - { - "factor": 1.0, - "filename": f"idomain_l{k + 1}.bin", - "data": 1, - "binary": True, - "iprn": 1, - } - ) - botm_data.append( - { - "filename": f"botm_l{k + 1}.bin", - "binary": True, - "iprn": 1, - "data": np.full(shape2d, botm[k], dtype=float), - } - ) - strt_data.append( - { - "filename": f"strt_l{k + 1}.bin", - "binary": True, - "iprn": 1, - "data": np.full(shape2d, strt[k], dtype=float), - } - ) - else: - idomain_data = { - "filename": "idomain.bin", - "binary": True, - "iprn": 1, - "data": 1, - } - botm_data = { - "filename": "botm.bin", - "binary": True, - "iprn": 1, - "data": np.array( - [ - np.full(shape2d, botm[0], dtype=float), - np.full(shape2d, botm[1], dtype=float), - ] - ), - } - strt_data = { - "filename": "strt.bin", - "binary": True, - "iprn": 1, - "data": np.array( - [ - np.full(shape2d, strt[0], dtype=float), - np.full(shape2d, strt[1], dtype=float), - ] - ), - } - - # binary data that does not vary by layers - top_data = { - "filename": "top.bin", - "binary": True, - "iprn": 1, - "data": 10.0, - } - rch_data = { - 0: { - "filename": "recharge.bin", - "binary": True, - "iprn": 1, - "data": 0.000001, - }, - } - chd_data = [ - (1, 0, 0, 10.0, 1.0, 100.0), - (1, 0, ncol - 1, 5.0, 0.0, 100.0), - ] - chd_data = { - 0: { - "filename": "chd.bin", - "binary": True, - "iprn": 1, - "data": chd_data, - }, - } - - sim = MFSimulation(sim_ws=str(function_tmpdir)) - ModflowTdis(sim) - ModflowIms(sim, complexity="simple") - gwf = ModflowGwf(sim, print_input=True) - ModflowGwfdis( - gwf, - nlay=nlay, - nrow=nrow, - ncol=ncol, - delr=1.0, - delc=1.0, - top=top_data, - botm=botm_data, - idomain=idomain_data, - ) - ModflowGwfnpf( - gwf, - icelltype=1, - ) - ModflowGwfic( - gwf, - strt=strt_data, - ) - ModflowGwfchd( - gwf, - auxiliary=["conc", "something"], - stress_period_data=chd_data, - ) - ModflowGwfrcha(gwf, recharge=rch_data) - - sim.write_simulation() - success, buff = sim.run_simulation() - assert success - - -@requires_exe("mf6") -@requires_pkg("shapely", "scipy") -@pytest.mark.parametrize("layered", [True, False]) -def test_vor_binary_write(function_tmpdir, layered): - # build voronoi grid - boundary = [(0.0, 0.0), (0.0, 1.0), (10.0, 1.0), (10.0, 0.0)] - triangle_ws = function_tmpdir / "triangle" - triangle_ws.mkdir(parents=True, exist_ok=True) - - tri = Triangle( - angle=30, - maximum_area=1.0, - model_ws=triangle_ws, - ) - tri.add_polygon(boundary) - tri.build(verbose=False) - vor = VoronoiGrid(tri) - - # problem dimensions - nlay = 2 - - # data for layers - botm = [4.0, 0.0] - strt = [5.0, 10.0] - - # build binary data - if layered: - idomain_data = [] - botm_data = [] - strt_data = [] - for k in range(nlay): - idomain_data.append( - { - "factor": 1.0, - "filename": f"idomain_l{k + 1}.bin", - "data": 1, - "binary": True, - "iprn": 1, - } - ) - botm_data.append( - { - "filename": f"botm_l{k + 1}.bin", - "binary": True, - "iprn": 1, - "data": np.full(vor.ncpl, botm[k], dtype=float), - } - ) - strt_data.append( - { - "filename": f"strt_l{k + 1}.bin", - "binary": True, - "iprn": 1, - "data": np.full(vor.ncpl, strt[k], dtype=float), - } - ) - else: - idomain_data = { - "filename": "idomain.bin", - "binary": True, - "iprn": 1, - "data": 1, - } - botm_data = { - "filename": "botm.bin", - "binary": True, - "iprn": 1, - "data": np.array( - [ - np.full(vor.ncpl, botm[0], dtype=float), - np.full(vor.ncpl, botm[1], dtype=float), - ] - ), - } - strt_data = { - "filename": "strt.bin", - "binary": True, - "iprn": 1, - "data": np.array( - [ - np.full(vor.ncpl, strt[0], dtype=float), - np.full(vor.ncpl, strt[1], dtype=float), - ] - ), - } - - # binary data that does not vary by layers - top_data = { - "filename": "top.bin", - "binary": True, - "iprn": 1, - "data": 10.0, - } - rch_data = { - 0: { - "filename": "recharge.bin", - "binary": True, - "iprn": 1, - "data": np.full(vor.ncpl, 0.000001, dtype=float), - }, - } - chd_data = [ - (1, 0, 10.0, 1.0, 100.0), - (1, 1, 10.0, 1.0, 100.0), - (1, 2, 5.0, 0.0, 100.0), - (1, 3, 5.0, 0.0, 100.0), - ] - chd_data = { - 0: { - "filename": "chd.bin", - "binary": True, - "data": chd_data, - }, - } - - # build model - sim = MFSimulation(sim_ws=str(function_tmpdir)) - ModflowTdis(sim) - ModflowIms(sim, complexity="simple") - gwf = ModflowGwf(sim, print_input=True) - flopy.mf6.ModflowGwfdisv( - gwf, - nlay=nlay, - ncpl=vor.ncpl, - nvert=vor.nverts, - vertices=vor.get_disv_gridprops()["vertices"], - cell2d=vor.get_disv_gridprops()["cell2d"], - top=top_data, - botm=botm_data, - idomain=idomain_data, - xorigin=0.0, - yorigin=0.0, - ) - ModflowGwfnpf( - gwf, - icelltype=1, - ) - ModflowGwfic( - gwf, - strt=strt_data, - ) - ModflowGwfrcha(gwf, recharge=rch_data) - ModflowGwfchd( - gwf, - auxiliary=["conc", "something"], - stress_period_data=chd_data, - ) - sim.write_simulation() - success, buff = sim.run_simulation() - assert success - - -def test_binary_read(function_tmpdir): - test_ex_name = "binary_read" - nlay = 3 - nrow = 10 - ncol = 10 - - modelgrid = flopy.discretization.StructuredGrid(nlay=nlay, nrow=nrow, ncol=ncol) - - arr = np.arange(nlay * nrow * ncol).astype(np.float64) - data_shape = (nlay, nrow, ncol) - data_size = nlay * nrow * ncol - arr.shape = data_shape - - sim_data = MFSimulationData("integration", None) - dstruct = MFDataItemStructure() - dstruct.is_cellid = False - dstruct.name = "fake" - dstruct.data_items = [ - None, - ] - mfstruct = MFDataStructure(dstruct, False, "ic", None) - mfstruct.data_item_structures = [ - dstruct, - ] - mfstruct.path = [ - "fake", - ] - - md = ModelDimensions("test", None) - pd = PackageDimensions([md], None, "integration") - dd = DataDimensions(pd, mfstruct) - - binfile = function_tmpdir / "structured_layered.hds" - with open(binfile, "wb") as foo: - for ix, a in enumerate(arr): - write_head(foo, a, ilay=ix) - - fa = MFFileAccessArray(mfstruct, dd, sim_data, None, None) - - # test path as both Path and str - for bf in [binfile, str(binfile)]: - arr2 = fa.read_binary_data_from_file( - bf, data_shape, data_size, np.float64, modelgrid - )[0] - - assert np.allclose(arr, arr2), ( - "Binary read for layered structured failed with " - + ("Path" if isinstance(binfile, Path) else "str") - ) - - binfile = function_tmpdir / "structured_flat.hds" - with open(binfile, "wb") as foo: - a = np.expand_dims(np.ravel(arr), axis=0) - write_head(foo, a, ilay=1) - - arr2 = fa.read_binary_data_from_file( - binfile, data_shape, data_size, np.float64, modelgrid - )[0] - - assert np.allclose(arr, arr2), "Binary read for flat Structured failed" - - ncpl = nrow * ncol - data_shape = (nlay, ncpl) - arr.shape = data_shape - modelgrid = flopy.discretization.VertexGrid(nlay=nlay, ncpl=ncpl) - - fa = MFFileAccessArray(mfstruct, dd, sim_data, None, None) - - binfile = function_tmpdir / "vertex_layered.hds" - with open(binfile, "wb") as foo: - tarr = arr.reshape((nlay, 1, ncpl)) - for ix, a in enumerate(tarr): - write_head(foo, a, ilay=ix) - - arr2 = fa.read_binary_data_from_file( - binfile, data_shape, data_size, np.float64, modelgrid - )[0] - - assert np.allclose(arr, arr2), "Binary read for layered Vertex failed" - - binfile = function_tmpdir / "vertex_flat.hds" - with open(binfile, "wb") as foo: - a = np.expand_dims(np.ravel(arr), axis=0) - write_head(foo, a, ilay=1) - - arr2 = fa.read_binary_data_from_file( - binfile, data_shape, data_size, np.float64, modelgrid - )[0] - - assert np.allclose(arr, arr2), "Binary read for flat Vertex failed" - - nlay = 3 - ncpl = [50, 100, 150] - data_shape = (np.sum(ncpl),) - arr.shape = data_shape - modelgrid = flopy.discretization.UnstructuredGrid(ncpl=ncpl) - - fa = MFFileAccessArray(mfstruct, dd, sim_data, None, None) - - binfile = function_tmpdir / "unstructured.hds" - with open(binfile, "wb") as foo: - a = np.expand_dims(arr, axis=0) - write_head(foo, a, ilay=1) - - arr2 = fa.read_binary_data_from_file( - binfile, data_shape, data_size, np.float64, modelgrid - )[0] - - assert np.allclose(arr, arr2), "Binary read for Unstructured failed" - - -@requires_exe("mf6") -def test_props_and_write(function_tmpdir): - # workspace as str - sim = MFSimulation(sim_ws=str(function_tmpdir)) - assert isinstance(sim, MFSimulation) - assert sim.simulation_data.mfpath.get_sim_path() == function_tmpdir == sim.sim_path - - # workspace as Path - sim = MFSimulation(sim_ws=function_tmpdir) - assert isinstance(sim, MFSimulation) - assert sim.simulation_data.mfpath.get_sim_path() == function_tmpdir == sim.sim_path - - tdis = ModflowTdis(sim) - assert isinstance(tdis, ModflowTdis) - - gwfgwf = ModflowGwfgwf(sim, exgtype="gwf6-gwf6", exgmnamea="gwf1", exgmnameb="gwf2") - assert isinstance(gwfgwf, ModflowGwfgwf) - - gwf = ModflowGwf(sim) - assert isinstance(gwf, ModflowGwf) - - ims = ModflowIms(sim) - assert isinstance(ims, ModflowIms) - sim.register_ims_package(ims, []) - - dis = ModflowGwfdis(gwf) - assert isinstance(dis, ModflowGwfdis) - - disu = ModflowGwfdisu(gwf) - assert isinstance(disu, ModflowGwfdisu) - - disv = ModflowGwfdisv(gwf) - assert isinstance(disv, ModflowGwfdisv) - - npf = ModflowGwfnpf(gwf) - assert isinstance(npf, ModflowGwfnpf) - - ic = ModflowGwfic(gwf) - assert isinstance(ic, ModflowGwfic) - - sto = ModflowGwfsto(gwf) - assert isinstance(sto, ModflowGwfsto) - - hfb = ModflowGwfhfb(gwf) - assert isinstance(hfb, ModflowGwfhfb) - - gnc = ModflowGwfgnc(gwf) - assert isinstance(gnc, ModflowGwfgnc) - - chd = ModflowGwfchd(gwf) - assert isinstance(chd, ModflowGwfchd) - - wel = ModflowGwfwel(gwf) - assert isinstance(wel, ModflowGwfwel) - - drn = ModflowGwfdrn(gwf) - assert isinstance(drn, ModflowGwfdrn) - - riv = ModflowGwfriv(gwf) - assert isinstance(riv, ModflowGwfriv) - - ghb = ModflowGwfghb(gwf) - assert isinstance(ghb, ModflowGwfghb) - - rch = ModflowGwfrch(gwf) - assert isinstance(rch, ModflowGwfrch) - - rcha = ModflowGwfrcha(gwf) - assert isinstance(rcha, ModflowGwfrcha) - - evt = ModflowGwfevt(gwf) - assert isinstance(evt, ModflowGwfevt) - - evta = ModflowGwfevta(gwf) - assert isinstance(evta, ModflowGwfevta) - - maw = ModflowGwfmaw(gwf) - assert isinstance(maw, ModflowGwfmaw) - - sfr = ModflowGwfsfr(gwf) - assert isinstance(sfr, ModflowGwfsfr) - - lak = ModflowGwflak(gwf) - assert isinstance(lak, ModflowGwflak) - - uzf = ModflowGwfuzf(gwf) - assert isinstance(uzf, ModflowGwfuzf) - - mvr = ModflowGwfmvr(gwf) - assert isinstance(mvr, ModflowGwfmvr) - - # Write files - sim.write_simulation() - - # Verify files were written - assert os.path.isfile(os.path.join(str(function_tmpdir), "mfsim.nam")) - exts_model = [ - "nam", - "dis", - "disu", - "disv", - "npf", - "ic", - "sto", - "hfb", - "gnc", - "chd", - "wel", - "drn", - "riv", - "ghb", - "rch", - "rcha", - "evt", - "evta", - "maw", - "sfr", - "lak", - "mvr", - ] - exts_sim = ["gwfgwf", "ims", "tdis"] - for ext in exts_model: - fname = os.path.join(str(function_tmpdir), f"model.{ext}") - assert os.path.isfile(fname), f"{fname} not found" - for ext in exts_sim: - fname = os.path.join(str(function_tmpdir), f"sim.{ext}") - assert os.path.isfile(fname), f"{fname} not found" - - -@pytest.mark.parametrize("use_paths", [True, False]) -def test_set_sim_path(function_tmpdir, use_paths): - sim_name = "testsim" - model_name = "testmodel" - exe_name = "mf6" - - # set up simulation - tdis_name = f"{sim_name}.tdis" - sim = MFSimulation( - sim_name=sim_name, - version="mf6", - exe_name=exe_name, - sim_ws=function_tmpdir, - ) - - new_ws = function_tmpdir / "new_ws" - new_ws.mkdir() - sim.set_sim_path(new_ws if use_paths else str(new_ws)) - - tdis_rc = [(6.0, 2, 1.0), (6.0, 3, 1.0)] - tdis = mftdis.ModflowTdis(sim, time_units="DAYS", nper=2, perioddata=tdis_rc) - - # create model instance - model = mfgwf.ModflowGwf( - sim, modelname=model_name, model_nam_file=f"{model_name}.nam" - ) - - sim.write_simulation() - - assert len([p for p in function_tmpdir.glob("*") if p.is_file()]) == 0 - assert len([p for p in new_ws.glob("*") if p.is_file()]) > 0 - - -@requires_exe("mf6") -@pytest.mark.parametrize("use_paths", [True, False]) -def test_create_and_run_model(function_tmpdir, use_paths): - # names - sim_name = "testsim" - model_name = "testmodel" - exe_name = "mf6" - - # set up simulation - tdis_name = f"{sim_name}.tdis" - if use_paths: - sim = MFSimulation( - sim_name=sim_name, - version="mf6", - exe_name=Path(which(exe_name)), - sim_ws=function_tmpdir, - ) - else: - sim = MFSimulation( - sim_name=sim_name, - version="mf6", - exe_name=str(exe_name), - sim_ws=str(function_tmpdir), - ) - tdis_rc = [(6.0, 2, 1.0), (6.0, 3, 1.0)] - tdis = mftdis.ModflowTdis(sim, time_units="DAYS", nper=2, perioddata=tdis_rc) - - # create model instance - model = mfgwf.ModflowGwf( - sim, modelname=model_name, model_nam_file=f"{model_name}.nam" - ) - - # create solution and add the model - ims_package = mfims.ModflowIms( - sim, - print_option="ALL", - complexity="SIMPLE", - outer_dvclose=0.00001, - outer_maximum=50, - under_relaxation="NONE", - inner_maximum=30, - inner_dvclose=0.00001, - linear_acceleration="CG", - preconditioner_levels=7, - preconditioner_drop_tolerance=0.01, - number_orthogonalizations=2, - ) - sim.register_ims_package(ims_package, [model_name]) - - # add packages to model - dis_package = mfgwfdis.ModflowGwfdis( - model, - length_units="FEET", - nlay=1, - nrow=1, - ncol=10, - delr=500.0, - delc=500.0, - top=100.0, - botm=50.0, - filename=f"{model_name}.dis", - ) - ic_package = mfgwfic.ModflowGwfic( - model, - strt=[100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0], - filename=f"{model_name}.ic", - ) - npf_package = mfgwfnpf.ModflowGwfnpf(model, save_flows=True, icelltype=1, k=100.0) - - sto_package = mfgwfsto.ModflowGwfsto( - model, save_flows=True, iconvert=1, ss=0.000001, sy=0.15 - ) - - wel_package = mfgwfwel.ModflowGwfwel( - model, - print_input=True, - print_flows=True, - save_flows=True, - maxbound=2, - stress_period_data=[((0, 0, 4), -2000.0), ((0, 0, 7), -2.0)], - ) - wel_package.stress_period_data.add_transient_key(1) - wel_package.stress_period_data.set_data([((0, 0, 4), -200.0)], 1) - - drn_package = mfgwfdrn.ModflowGwfdrn( - model, - print_input=True, - print_flows=True, - save_flows=True, - maxbound=1, - stress_period_data=[((0, 0, 0), 80, 60.0)], - ) - - riv_package = mfgwfriv.ModflowGwfriv( - model, - print_input=True, - print_flows=True, - save_flows=True, - maxbound=1, - stress_period_data=[((0, 0, 9), 110, 90.0, 100.0)], - ) - oc_package = mfgwfoc.ModflowGwfoc( - model, - budget_filerecord=[f"{model_name}.cbc"], - head_filerecord=[f"{model_name}.hds"], - saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], - printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], - ) - oc_package.saverecord.add_transient_key(1) - oc_package.saverecord.set_data([("HEAD", "ALL"), ("BUDGET", "ALL")], 1) - oc_package.printrecord.add_transient_key(1) - oc_package.printrecord.set_data([("HEAD", "ALL"), ("BUDGET", "ALL")], 1) - - # write the simulation input files - sim.write_simulation() - - # run the simulation and look for output - success, buff = sim.run_simulation() - assert success - - -@requires_exe("mf6") -def test_get_set_data_record(function_tmpdir): - # names - sim_name = "testrecordsim" - model_name = "testrecordmodel" - exe_name = "mf6" - - # set up simulation - tdis_name = f"{sim_name}.tdis" - sim = MFSimulation( - sim_name=sim_name, - version="mf6", - exe_name=exe_name, - sim_ws=str(function_tmpdir), - ) - tdis_rc = [(10.0, 4, 1.0), (6.0, 3, 1.0)] - tdis = mftdis.ModflowTdis(sim, time_units="DAYS", nper=2, perioddata=tdis_rc) - - # create model instance - model = mfgwf.ModflowGwf( - sim, modelname=model_name, model_nam_file=f"{model_name}.nam" - ) - - # create solution and add the model - ims_package = mfims.ModflowIms( - sim, - print_option="ALL", - complexity="SIMPLE", - outer_dvclose=0.00001, - outer_maximum=50, - under_relaxation="NONE", - inner_maximum=30, - inner_dvclose=0.00001, - linear_acceleration="CG", - preconditioner_levels=7, - preconditioner_drop_tolerance=0.01, - number_orthogonalizations=2, - ) - sim.register_ims_package(ims_package, [model_name]) - - # add packages to model - dis_package = mfgwfdis.ModflowGwfdis( - model, - length_units="FEET", - nlay=3, - nrow=10, - ncol=10, - delr=500.0, - delc=500.0, - top=100.0, - botm=[50.0, 10.0, -50.0], - filename=f"{model_name}.dis", - ) - ic_package = mfgwfic.ModflowGwfic( - model, - strt=[100.0, 90.0, 80.0], - filename=f"{model_name}.ic", - ) - npf_package = mfgwfnpf.ModflowGwfnpf( - model, save_flows=True, icelltype=1, k=50.0, k33=1.0 - ) - - sto_package = mfgwfsto.ModflowGwfsto( - model, save_flows=True, iconvert=1, ss=0.000001, sy=0.15 - ) - # wel packages - period_one = ModflowGwfwel.stress_period_data.empty( - model, - maxbound=3, - aux_vars=["var1", "var2", "var3"], - boundnames=True, - timeseries=True, - ) - period_one[0][0] = ((0, 9, 2), -50.0, -1, -2, -3, None) - period_one[0][1] = ((1, 4, 7), -100.0, 1, 2, 3, "well_1") - period_one[0][2] = ((1, 3, 2), -20.0, 4, 5, 6, "well_2") - period_two = ModflowGwfwel.stress_period_data.empty( - model, - maxbound=2, - aux_vars=["var1", "var2", "var3"], - boundnames=True, - timeseries=True, - ) - period_two[0][0] = ((2, 3, 2), -80.0, 1, 2, 3, "well_2") - period_two[0][1] = ((2, 4, 7), -10.0, 4, 5, 6, "well_1") - stress_period_data = {} - stress_period_data[0] = period_one[0] - stress_period_data[1] = period_two[0] - wel_package = ModflowGwfwel( - model, - print_input=True, - print_flows=True, - auxiliary=[("var1", "var2", "var3")], - maxbound=5, - stress_period_data=stress_period_data, - boundnames=True, - save_flows=True, - ) - # rch package - rch_period_list = [] - for row in range(0, 10): - for col in range(0, 10): - rch_amt = (1 + row / 10) * (1 + col / 10) - rch_period_list.append(((0, row, col), rch_amt, 0.5)) - rch_period = {} - rch_period[0] = rch_period_list - rch_package = ModflowGwfrch( - model, - fixed_cell=True, - auxiliary="MULTIPLIER", - auxmultname="MULTIPLIER", - print_input=True, - print_flows=True, - save_flows=True, - maxbound=54, - stress_period_data=rch_period, - ) - - # write simulation to new location - sim.set_all_data_external() - sim.write_simulation() - - # test get_record, set_record for list data - wel = model.get_package("wel") - spd_record = wel.stress_period_data.get_record() - well_sp_1 = spd_record[0] - assert well_sp_1["filename"] == "testrecordmodel.wel_stress_period_data_1.txt" - assert well_sp_1["binary"] is False - assert well_sp_1["data"][0][0] == (0, 9, 2) - assert well_sp_1["data"][0][1] == -50.0 - # modify - del well_sp_1["filename"] - well_sp_1["data"][0][0] = (1, 9, 2) - well_sp_2 = spd_record[1] - del well_sp_2["filename"] - well_sp_2["data"][0][0] = (1, 1, 1) - # save - spd_record[0] = well_sp_1 - spd_record[1] = well_sp_2 - wel.stress_period_data.set_record(spd_record) - # verify changes - spd_record = wel.stress_period_data.get_record() - well_sp_1 = spd_record[0] - assert "filename" not in well_sp_1 - assert well_sp_1["data"][0][0] == (1, 9, 2) - assert well_sp_1["data"][0][1] == -50.0 - well_sp_2 = spd_record[1] - assert "filename" not in well_sp_2 - assert well_sp_2["data"][0][0] == (1, 1, 1) - spd = wel.stress_period_data.get_data() - assert spd[0][0][0] == (1, 9, 2) - # change well_sp_2 back to external - well_sp_2["filename"] = "wel_spd_data_2.txt" - spd_record[1] = well_sp_2 - wel.stress_period_data.set_record(spd_record) - # change well_sp_2 data - spd[1][0][0] = (1, 2, 2) - wel.stress_period_data.set_data(spd) - # verify changes - spd_record = wel.stress_period_data.get_record() - well_sp_2 = spd_record[1] - assert well_sp_2["filename"] == "wel_spd_data_2.txt" - assert well_sp_2["data"][0][0] == (1, 2, 2) - - # test get_data/set_data vs get_record/set_record - dis = model.get_package("dis") - botm = dis.botm.get_record() - assert len(botm) == 3 - layer_2 = botm[1] - layer_3 = botm[2] - # verify layer 2 - assert layer_2["filename"] == "testrecordmodel.dis_botm_layer2.txt" - assert layer_2["binary"] is False - assert layer_2["factor"] == 1.0 - assert layer_2["iprn"] is None - assert layer_2["data"][0][0] == 10.0 - # change and set layer 2 - layer_2["filename"] = "botm_layer2.txt" - layer_2["binary"] = True - layer_2["iprn"] = 3 - layer_2["factor"] = 2.0 - layer_2["data"] = layer_2["data"] * 0.5 - botm[1] = layer_2 - # change and set layer 3 - del layer_3["filename"] - layer_3["factor"] = 0.5 - layer_3["data"] = layer_3["data"] * 2.0 - botm[2] = layer_3 - dis.botm.set_record(botm) - - # get botm in two different ways, verifying changes made - botm_record = dis.botm.get_record() - layer_1 = botm_record[0] - assert layer_1["filename"] == "testrecordmodel.dis_botm_layer1.txt" - assert layer_1["binary"] is False - assert layer_1["iprn"] is None - assert layer_1["data"][0][0] == 50.0 - layer_2 = botm_record[1] - assert layer_2["filename"] == "botm_layer2.txt" - assert layer_2["binary"] is True - assert layer_2["factor"] == 2.0 - assert layer_2["iprn"] == 3 - assert layer_2["data"][0][0] == 5.0 - layer_3 = botm_record[2] - assert "filename" not in layer_3 - assert layer_3["factor"] == 0.5 - assert layer_3["data"][0][0] == -100.0 - botm_data = dis.botm.get_data(apply_mult=True) - assert botm_data[0][0][0] == 50.0 - assert botm_data[1][0][0] == 10.0 - assert botm_data[2][0][0] == -50.0 - botm_data = dis.botm.get_data() - assert botm_data[0][0][0] == 50.0 - assert botm_data[1][0][0] == 5.0 - assert botm_data[2][0][0] == -100.0 - # modify and set botm data with set_data - botm_data[0][0][0] = 6.0 - botm_data[1][0][0] = -8.0 - botm_data[2][0][0] = -205.0 - dis.botm.set_data(botm_data) - # verify that data changed and metadata did not change - botm_record = dis.botm.get_record() - layer_1 = botm_record[0] - assert layer_1["filename"] == "testrecordmodel.dis_botm_layer1.txt" - assert layer_1["binary"] is False - assert layer_1["iprn"] is None - assert layer_1["data"][0][0] == 6.0 - assert layer_1["data"][0][1] == 50.0 - layer_2 = botm_record[1] - assert layer_2["filename"] == "botm_layer2.txt" - assert layer_2["binary"] is True - assert layer_2["factor"] == 2.0 - assert layer_2["iprn"] == 3 - assert layer_2["data"][0][0] == -8.0 - assert layer_2["data"][0][1] == 5.0 - layer_3 = botm_record[2] - assert "filename" not in layer_3 - assert layer_3["factor"] == 0.5 - assert layer_3["data"][0][0] == -205.0 - botm_data = dis.botm.get_data() - assert botm_data[0][0][0] == 6.0 - assert botm_data[1][0][0] == -8.0 - assert botm_data[2][0][0] == -205.0 - - spd_record = rch_package.stress_period_data.get_record() - assert 0 in spd_record - assert isinstance(spd_record[0], dict) - assert "filename" in spd_record[0] - assert spd_record[0]["filename"] == "testrecordmodel.rch_stress_period_data_1.txt" - assert "binary" in spd_record[0] - assert spd_record[0]["binary"] is False - assert "data" in spd_record[0] - assert spd_record[0]["data"][0][0] == (0, 0, 0) - spd_record[0]["data"][0][0] = (0, 0, 8) - rch_package.stress_period_data.set_record(spd_record) - - spd_data = rch_package.stress_period_data.get_data() - assert spd_data[0][0][0] == (0, 0, 8) - spd_data[0][0][0] = (0, 0, 7) - rch_package.stress_period_data.set_data(spd_data) - - spd_record = rch_package.stress_period_data.get_record() - assert isinstance(spd_record[0], dict) - assert "filename" in spd_record[0] - assert spd_record[0]["filename"] == "testrecordmodel.rch_stress_period_data_1.txt" - assert "binary" in spd_record[0] - assert spd_record[0]["binary"] is False - assert "data" in spd_record[0] - assert spd_record[0]["data"][0][0] == (0, 0, 7) - - sim.write_simulation() - - -@requires_exe("mf6") -def test_output(function_tmpdir, example_data_path): - ex_name = "test001e_UZF_3lay" - sim_ws = example_data_path / "mf6" / ex_name - sim = MFSimulation.load(sim_ws=sim_ws, exe_name="mf6") - sim.set_sim_path(str(function_tmpdir)) - sim.write_simulation() - success, buff = sim.run_simulation() - assert success, f"simulation {sim.name} did not run" - - ml = sim.get_model("gwf_1") - - bud = ml.oc.output.budget() - budcsv = ml.oc.output.budgetcsv() - assert budcsv.file.closed - hds = ml.oc.output.head() - lst = ml.oc.output.list() - - idomain = np.ones(ml.modelgrid.shape, dtype=int) - zonbud = ml.oc.output.zonebudget(idomain) - - assert isinstance(bud, CellBudgetFile) - assert isinstance(budcsv, CsvFile) - assert isinstance(hds, HeadFile) - assert isinstance(zonbud, ZoneBudget6) - assert isinstance(lst, Mf6ListBudget) - - bud = ml.output.budget() - budcsv = ml.output.budgetcsv() - hds = ml.output.head() - zonbud = ml.output.zonebudget(idomain) - lst = ml.output.list() - - assert isinstance(bud, CellBudgetFile) - assert isinstance(budcsv, CsvFile) - assert isinstance(hds, HeadFile) - assert isinstance(zonbud, ZoneBudget6) - assert isinstance(lst, Mf6ListBudget) - - uzf = ml.uzf - uzf_bud = uzf.output.budget() - uzf_budcsv = uzf.output.budgetcsv() - conv = uzf.output.package_convergence() - uzf_obs = uzf.output.obs() - uzf_zonbud = uzf.output.zonebudget(idomain) - - assert isinstance(uzf_bud, CellBudgetFile) - assert isinstance(uzf_budcsv, CsvFile) - if conv is not None: - assert isinstance(conv, CsvFile) - assert isinstance(uzf_obs, Mf6Obs) - assert isinstance(uzf_zonbud, ZoneBudget6) - assert ml.dis.output.methods() is None - - -@requires_exe("mf6") -@pytest.mark.slow -def test_output_add_observation(function_tmpdir, example_data_path): - model_name = "lakeex2a" - sim_ws = str(example_data_path / "mf6" / "test045_lake2tr") - sim = MFSimulation.load(sim_ws=sim_ws, exe_name="mf6") - gwf = sim.get_model(model_name) - - # remove sfr_obs and add a new sfr obs - sfr = gwf.sfr - obs_file = f"{model_name}.sfr.obs" - csv_file = f"{obs_file}.csv" - obs_dict = { - csv_file: [ - ("l08_stage", "stage", (8,)), - ("l09_stage", "stage", (9,)), - ("l14_stage", "stage", (14,)), - ("l15_stage", "stage", (15,)), - ] - } - gwf.sfr.obs.initialize( - filename=obs_file, digits=10, print_input=True, continuous=obs_dict - ) - - sim.set_sim_path(str(function_tmpdir)) - sim.write_simulation() - - success, buff = sim.run_simulation() - assert success, f"simulation {sim.name} did not run" - - # check that .output finds the newly added OBS package - sfr_obs = gwf.sfr.output.obs() - - assert isinstance(sfr_obs, Mf6Obs), ( - "remove and add observation test (Mf6Output) failed" - ) - - -@requires_exe("mf6") -def test_sfr_connections(function_tmpdir, example_data_path): - """MODFLOW just warns if any reaches are unconnected - flopy fails to load model if reach 1 is unconnected, fine with other unconnected - """ - - data_path = example_data_path / "mf6" / "test666_sfrconnections" - sim_ws = function_tmpdir - for test in ["sfr0", "sfr1"]: - sim_name = "test_sfr" - model_name = "test_sfr" - tdis_name = f"{sim_name}.tdis" - sim = MFSimulation( - sim_name=sim_name, version="mf6", exe_name="mf6", sim_ws=sim_ws - ) - tdis_rc = [(1.0, 1, 1.0)] - tdis = ModflowTdis(sim, time_units="DAYS", nper=1, perioddata=tdis_rc) - ims_package = ModflowIms( - sim, - pname="my_ims_file", - filename=f"{sim_name}.ims", - print_option="ALL", - complexity="SIMPLE", - ) - model = ModflowGwf( - sim, modelname=model_name, model_nam_file=f"{model_name}.nam" - ) - - dis = ModflowGwfdis( - model, - length_units="FEET", - nlay=1, - nrow=5, - ncol=5, - delr=5000.0, - delc=5000.0, - top=100.0, - botm=-100.0, - filename=f"{model_name}.dis", - ) - ic_package = ModflowGwfic(model, filename=f"{model_name}.ic") - npf_package = ModflowGwfnpf( - model, - pname="npf", - save_flows=True, - alternative_cell_averaging="logarithmic", - icelltype=1, - k=50.0, - ) - - cnfile = f"mf6_{test}_connection.txt" - pkfile = f"mf6_{test}_package.txt" - - with open(data_path / pkfile, "r") as f: - nreaches = len(f.readlines()) - sfr = ModflowGwfsfr( - model, - packagedata={"filename": str(data_path / pkfile)}, - connectiondata={"filename": str(data_path / cnfile)}, - nreaches=nreaches, - pname="sfr", - unit_conversion=86400, - ) - sim.set_all_data_external() - sim.write_simulation() - success, buff = sim.run_simulation() - assert success, f"simulation {sim.name} did not run" - - # reload simulation - sim2 = MFSimulation.load(sim_ws=sim_ws) - sim2.set_all_data_external() - sim2.write_simulation() - success, buff = sim2.run_simulation() - assert success, f"simulation {sim2.name} did not run after being reloaded" - - # test sfr recarray data - model2 = sim2.get_model() - sfr2 = model2.get_package("sfr") - sfr_pd = sfr2.packagedata - rec_data = [ - (0, 0, 0, 0, 1.0, 1.0, 0.01, 10.0, 1.0, 1.0, 1.0, 1, 1.0, 0), - (1, 0, 1, 0, 1.0, 1.0, 0.01, 10.0, 1.0, 1.0, 1.0, 2, 1.0, 0), - ] - rec_type = [ - ("ifno", int), - ("layer", int), - ("row", int), - ("column", int), - ("rlen", float), - ("rwid", float), - ("rgrd", float), - ("rtp", float), - ("rbth", float), - ("rhk", float), - ("man", float), - ("nconn", int), - ("ustrf", float), - ("nvd", int), - ] - pkg_data = np.rec.array(rec_data, rec_type) - sfr_pd.set_record({"data": pkg_data}) - data = sfr_pd.get_data() - assert data[0][1] == (0, 0, 0) - - -@requires_exe("mf6") -def test_array(function_tmpdir): - # get_data - # empty data in period block vs data repeating - # array - # aux values, test that they work the same as other arrays (is a value - # of zero always used even if aux is defined in a previous stress - # period?) - - sim_name = "test_array" - model_name = "test_array" - out_dir = function_tmpdir - tdis_name = f"{sim_name}.tdis" - sim = MFSimulation(sim_name=sim_name, version="mf6", exe_name="mf6", sim_ws=out_dir) - tdis_rc = [(6.0, 2, 1.0), (6.0, 3, 1.0), (6.0, 3, 1.0), (6.0, 3, 1.0)] - tdis = ModflowTdis(sim, time_units="DAYS", nper=4, perioddata=tdis_rc) - ims_package = ModflowIms( - sim, - pname="my_ims_file", - filename=f"{sim_name}.ims", - print_option="ALL", - complexity="SIMPLE", - outer_dvclose=0.0001, - outer_maximum=50, - under_relaxation="NONE", - inner_maximum=30, - inner_dvclose=0.0001, - linear_acceleration="CG", - preconditioner_levels=7, - preconditioner_drop_tolerance=0.01, - number_orthogonalizations=2, - ) - model = ModflowGwf(sim, modelname=model_name, model_nam_file=f"{model_name}.nam") - - dis = ModflowGwfdis( - model, - length_units="FEET", - nlay=4, - nrow=2, - ncol=2, - delr=5000.0, - delc=5000.0, - top=100.0, - botm=[50.0, 0.0, -50.0, -100.0], - filename=f"{model_name} 1.dis", - ) - ic_package = ModflowGwfic(model, strt=90.0, filename=f"{model_name}.ic") - npf_package = ModflowGwfnpf( - model, - pname="npf_1", - save_flows=True, - alternative_cell_averaging="logarithmic", - icelltype=1, - k=50.0, - ) - - oc_package = ModflowGwfoc( - model, - budget_filerecord=[("test_array.cbc",)], - head_filerecord=[("test_array.hds",)], - saverecord={ - 0: [("HEAD", "ALL"), ("BUDGET", "ALL")], - 1: [], - }, - printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], - ) - - aux = {1: [[50.0], [1.3]], 3: [[200.0], [1.5]]} - irch = {1: [[0, 2], [2, 1]], 2: [[0, 1], [2, 3]]} - rcha = ModflowGwfrcha( - model, - print_input=True, - print_flows=True, - auxiliary=[("var1", "var2")], - irch=irch, - recharge={1: 0.0001, 2: 0.00001}, - aux=aux, - ) - val_irch = rcha.irch.array.sum(axis=(1, 2, 3)) - assert val_irch[0] == 4 - assert val_irch[1] == 5 - assert val_irch[2] == 6 - assert val_irch[3] == 6 - val_irch_2 = rcha.irch.get_data() - assert val_irch_2[0] is None - assert val_irch_2[1][1, 1] == 1 - assert val_irch_2[2][1, 1] == 3 - assert val_irch_2[3] is None - val_irch_2_3 = rcha.irch.get_data(3) - assert val_irch_2_3 is None - val_rch = rcha.recharge.array.sum(axis=(1, 2, 3)) - assert val_rch[0] == 0.0 - assert val_rch[1] == 0.0004 - assert val_rch[2] == 0.00004 - assert val_rch[3] == 0.00004 - val_rch_2 = rcha.recharge.get_data() - assert val_rch_2[0] is None - assert val_rch_2[1][0, 0] == 0.0001 - assert val_rch_2[2][0, 0] == 0.00001 - assert val_rch_2[3] is None - aux_data_0 = rcha.aux.get_data(0) - assert aux_data_0 is None - aux_data_1 = rcha.aux.get_data(1) - assert aux_data_1[0][0][0] == 50.0 - aux_data_2 = rcha.aux.get_data(2) - assert aux_data_2 is None - aux_data_3 = rcha.aux.get_data(3) - assert aux_data_3[0][0][0] == 200.0 - - welspdict = {1: [[(0, 0, 0), 0.25, 0.0]], 2: [[(0, 0, 0), 0.1, 0.0]]} - wel = ModflowGwfwel( - model, - print_input=True, - print_flows=True, - mover=True, - stress_period_data=welspdict, - save_flows=False, - auxiliary="CONCENTRATION", - pname="WEL-1", - ) - wel_array = wel.stress_period_data.array - #print(type(wel.stress_period_data)) - #print(type(wel.stress_period_data.array)) - assert wel_array[0] is None - assert wel_array[1][0][1] == 0.25 - assert wel_array[2][0][1] == 0.1 - assert wel_array[3][0][1] == 0.1 - - drnspdict = { - 0: [[(0, 0, 0), 60.0, 10.0]], - 2: [], - 3: [[(0, 0, 0), 55.0, 5.0]], - } - drn = ModflowGwfdrn( - model, - print_input=True, - print_flows=True, - stress_period_data=drnspdict, - save_flows=False, - pname="DRN-1", - ) - drn_array = drn.stress_period_data.array - assert drn_array[0][0][1] == 60.0 - assert drn_array[1][0][1] == 60.0 - assert drn_array[2] is None - assert drn_array[3][0][1] == 55.0 - drn_gd_0 = drn.stress_period_data.get_data(0) - assert drn_gd_0[0][1] == 60.0 - drn_gd_1 = drn.stress_period_data.get_data(1) - assert drn_gd_1 is None - drn_gd_2 = drn.stress_period_data.get_data(2) - assert len(drn_gd_2) == 0 - drn_gd_3 = drn.stress_period_data.get_data(3) - assert drn_gd_3[0][1] == 55.0 - - ghbspdict = { - 0: [[(0, 1, 1), 60.0, 10.0]], - } - ghb = ModflowGwfghb( - model, - print_input=True, - print_flows=True, - stress_period_data=ghbspdict, - save_flows=False, - pname="GHB-1", - ) - - lakpd = [(0, 70.0, 1), (1, 65.0, 1)] - lakecn = [ - (0, 0, (0, 0, 0), "HORIZONTAL", 1.0, 60.0, 90.0, 10.0, 1.0), - (1, 0, (0, 1, 1), "HORIZONTAL", 1.0, 60.0, 90.0, 10.0, 1.0), - ] - lak_tables = [(0, "lak01.tab"), (1, "lak02.tab")] - lak = ModflowGwflak( - model, - pname="lak", - print_input=True, - mover=True, - nlakes=2, - noutlets=0, - ntables=1, - packagedata=lakpd, - connectiondata=lakecn, - tables=lak_tables, - ) - - table_01 = [ - (30.0, 100000.0, 10000.0), - (40.0, 200500.0, 10100.0), - (50.0, 301200.0, 10130.0), - (60.0, 402000.0, 10180.0), - (70.0, 503000.0, 10200.0), - (80.0, 700000.0, 20000.0), - ] - lak_tab = ModflowUtllaktab( - model, - filename="lak01.tab", - nrow=6, - ncol=3, - table=table_01, - ) - - table_02 = [ - (40.0, 100000.0, 10000.0), - (50.0, 200500.0, 10100.0), - (60.0, 301200.0, 10130.0), - (70.0, 402000.0, 10180.0), - (80.0, 503000.0, 10200.0), - (90.0, 700000.0, 20000.0), - ] - lak_tab_2 = ModflowUtllaktab( - model, - filename="lak02.tab", - nrow=6, - ncol=3, - table=table_02, - ) - wel_name_1 = wel.name[0] - lak_name_2 = lak.name[0] - package_data = [(wel_name_1,), (lak_name_2,)] - period_data = [(wel_name_1, 0, lak_name_2, 0, "FACTOR", 1.0)] - fname = f"{model.name}.input.mvr" - mvr = ModflowGwfmvr( - parent_model_or_package=model, - filename=fname, - print_input=True, - print_flows=True, - maxpackages=2, - maxmvr=1, - packages=package_data, - perioddata=period_data, - ) - - # test writing and loading model - sim.write_simulation() - sim.run_simulation() - - test_sim = MFSimulation.load( - sim_name, - "mf6", - "mf6", - out_dir, - write_headers=False, - ) - model = test_sim.get_model() - dis = model.get_package("dis") - rcha = model.get_package("rcha") - wel = model.get_package("wel") - drn = model.get_package("drn") - lak = model.get_package("lak") - lak_tab = model.get_package("laktab") - assert os.path.split(dis.filename)[1] == f"{model_name} 1.dis" - # do same tests as above - val_irch = rcha.irch.array.sum(axis=(1, 2, 3)) - assert val_irch[0] == 4 - assert val_irch[1] == 5 - assert val_irch[2] == 6 - assert val_irch[3] == 6 - val_irch_2 = rcha.irch.get_data() - assert val_irch_2[0] is None - assert val_irch_2[1][1, 1] == 1 - assert val_irch_2[2][1, 1] == 3 - assert val_irch_2[3] is None - val_rch = rcha.recharge.array.sum(axis=(1, 2, 3)) - assert val_rch[0] == 0.0 - assert val_rch[1] == 0.0004 - assert val_rch[2] == 0.00004 - assert val_rch[3] == 0.00004 - val_rch_2 = rcha.recharge.get_data() - assert val_rch_2[0] is None - assert val_rch_2[1][0, 0] == 0.0001 - assert val_rch_2[2][0, 0] == 0.00001 - assert val_rch_2[3] is None - aux_data_0 = rcha.aux.get_data(0) - assert aux_data_0 is None - aux_data_1 = rcha.aux.get_data(1) - assert aux_data_1[0][0][0] == 50.0 - aux_data_2 = rcha.aux.get_data(2) - assert aux_data_2 is None - aux_data_3 = rcha.aux.get_data(3) - assert aux_data_3[0][0][0] == 200.0 - - wel_array = wel.stress_period_data.array - assert wel_array[0] is None - assert wel_array[1][0][1] == 0.25 - assert wel_array[2][0][1] == 0.1 - assert wel_array[3][0][1] == 0.1 - - drn_array = drn.stress_period_data.array - assert drn_array[0][0][1] == 60.0 - assert drn_array[1][0][1] == 60.0 - assert drn_array[2] is None - assert drn_array[3][0][1] == 55.0 - drn_gd_0 = drn.stress_period_data.get_data(0) - assert drn_gd_0[0][1] == 60.0 - drn_gd_1 = drn.stress_period_data.get_data(1) - assert drn_gd_1 is None - drn_gd_2 = drn.stress_period_data.get_data(2) - assert len(drn_gd_2) == 0 - drn_gd_3 = drn.stress_period_data.get_data(3) - assert drn_gd_3[0][1] == 55.0 - - lak_tab_array = lak.tables.get_data() - assert lak_tab_array[0][1] == "lak01.tab" - assert lak_tab_array[1][1] == "lak02.tab" - - assert len(lak_tab) == 2 - lak_tab_1 = lak_tab[0].table.get_data() - assert lak_tab_1[0][0] == 30.0 - assert lak_tab_1[5][2] == 20000.0 - lak_tab_2 = lak_tab[1].table.get_data() - assert lak_tab_2[0][0] == 40.0 - assert lak_tab_2[4][1] == 503000.0 - - -@requires_exe("mf6") -def test_grid_array(function_tmpdir): - # get_data - # empty data in period block vs data repeating - # array - # aux values, test that they work the same as other arrays (is a value - # of zero always used even if aux is defined in a previous stress - # period?) - - sim_name = "test_grid_array" - model_name = "test_grid_array" - out_dir = function_tmpdir - tdis_name = f"{sim_name}.tdis" - sim = MFSimulation(sim_name=sim_name, version="mf6", exe_name="mf6", sim_ws=out_dir) - tdis_rc = [(6.0, 2, 1.0), (6.0, 3, 1.0), (6.0, 3, 1.0), (6.0, 3, 1.0)] - tdis = ModflowTdis(sim, time_units="DAYS", nper=4, perioddata=tdis_rc) - ims_package = ModflowIms( - sim, - pname="my_ims_file", - filename=f"{sim_name}.ims", - print_option="ALL", - complexity="SIMPLE", - outer_dvclose=0.0001, - outer_maximum=50, - under_relaxation="NONE", - inner_maximum=30, - inner_dvclose=0.0001, - linear_acceleration="CG", - preconditioner_levels=7, - preconditioner_drop_tolerance=0.01, - number_orthogonalizations=2, - ) - model = ModflowGwf(sim, modelname=model_name, model_nam_file=f"{model_name}.nam") - - dis = ModflowGwfdis( - model, - length_units="FEET", - nlay=4, - nrow=2, - ncol=2, - delr=5000.0, - delc=5000.0, - top=100.0, - botm=[50.0, 0.0, -50.0, -100.0], - filename=f"{model_name} 1.dis", - ) - ic_package = ModflowGwfic(model, strt=90.0, filename=f"{model_name}.ic") - npf_package = ModflowGwfnpf( - model, - pname="npf_1", - save_flows=True, - alternative_cell_averaging="logarithmic", - icelltype=1, - k=50.0, - ) - - oc_package = ModflowGwfoc( - model, - budget_filerecord=[("test_array.cbc",)], - head_filerecord=[("test_array.hds",)], - saverecord={ - 0: [("HEAD", "ALL"), ("BUDGET", "ALL")], - 1: [], - }, - printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], - ) - - aux = {1: [[50.0], [1.3]], 3: [[200.0], [1.5]]} - #aux2 = {1: [[50.0], [1.3]], 2: [[60.0], [11.3]], 3: [[200.0], [1.5]]} - irch = {1: [[0, 2], [2, 1]], 2: [[0, 1], [2, 3]]} - rcha = ModflowGwfrcha( - model, - print_input=True, - print_flows=True, - auxiliary=[("var1", "var2")], - irch=irch, - recharge={1: 0.0001, 2: 0.00001}, - aux=aux, - ) - print(f"RENO RCHA") - print(np.shape(rcha.recharge.array)) - print(rcha.recharge.array) - print(rcha.recharge.get_data()) - print(f"RENO RCHA AUX") - print(rcha.aux.get_data(0)) - print(rcha.aux.get_data(1)) - print(rcha.aux.get_data(2)) - print(rcha.aux.get_data(3)) - print(f"SHAPE: {np.shape(rcha.aux.array)}") - #rcha.aux.set_data(aux2) - print(rcha.aux.array) - print(rcha.aux.get_data()) - print(f"RENO RCHA AUX ARRAY:") - #assert False - val_irch = rcha.irch.array.sum(axis=(1, 2, 3)) - assert val_irch[0] == 4 - assert val_irch[1] == 5 - assert val_irch[2] == 6 - assert val_irch[3] == 6 - val_irch_2 = rcha.irch.get_data() - assert val_irch_2[0] is None - assert val_irch_2[1][1, 1] == 1 - assert val_irch_2[2][1, 1] == 3 - assert val_irch_2[3] is None - val_irch_2_3 = rcha.irch.get_data(3) - assert val_irch_2_3 is None - val_rch = rcha.recharge.array.sum(axis=(1, 2, 3)) - assert val_rch[0] == 0.0 - assert val_rch[1] == 0.0004 - assert val_rch[2] == 0.00004 - assert val_rch[3] == 0.00004 - val_rch_2 = rcha.recharge.get_data() - assert val_rch_2[0] is None - assert val_rch_2[1][0, 0] == 0.0001 - assert val_rch_2[2][0, 0] == 0.00001 - assert val_rch_2[3] is None - aux_data_0 = rcha.aux.get_data(0) - assert aux_data_0 is None - aux_data_1 = rcha.aux.get_data(1) - assert aux_data_1[0][0][0] == 50.0 - aux_data_2 = rcha.aux.get_data(2) - assert aux_data_2 is None - aux_data_3 = rcha.aux.get_data(3) - assert aux_data_3[0][0][0] == 200.0 - -# welspdict = {1: [[(0, 0, 0), 0.25, 0.0]], 2: [[(0, 0, 0), 0.1, 0.0]]} -# wel = ModflowGwfwel( -# model, -# print_input=True, -# print_flows=True, -# mover=True, -# stress_period_data=welspdict, -# save_flows=False, -# auxiliary="CONCENTRATION", -# pname="WEL-1", -# ) -# wel_array = wel.stress_period_data.array -# assert wel_array[0] is None -# assert wel_array[1][0][1] == 0.25 -# assert wel_array[2][0][1] == 0.1 -# assert wel_array[3][0][1] == 0.1 - - nlay = dis.nlay.get_data() - nrow = dis.nrow.get_data() - ncol = dis.ncol.get_data() - - DNODATA = 3.0e30 # MF6 DNODATA constant - welqspd = {} - welconcspd = {} - #for n in range(2): - for n in range(4): - q = np.full((nlay, nrow, ncol), DNODATA, dtype=float) - welconc = np.full((nlay, nrow, ncol), DNODATA, dtype=float) - welaux2 = np.full((nlay, nrow, ncol), DNODATA, dtype=float) - #if n == 0: - if n == 1: - q[0, 0, 0] = 0.25 - welconc[0, 0, 0] = 0.0 - welaux2[0, 0, 0] = 9.0 - #elif n == 1: - elif n == 2: - q[0, 0, 0] = 0.1 - welconc[0, 0, 0] = 0.0 - welaux2[0, 0, 0] = 9.0 - #welqspd[n + 1] = q - #welconcspd[n + 1] = [welconc, welaux2] - welqspd[n] = q - welconcspd[n] = [welconc, welaux2] - - # first create test package with multiple auxvars - wel = ModflowGwfwelg( - model, - print_input=True, - print_flows=True, - mover=True, - save_flows=False, - auxiliary=["var1", "var2"], - pname="WEL-1", - q=welqspd, - #aux={1: [[50.0], [1.3]], 3: [[200.0], [1.5]]}, - aux=welconcspd, - ) - - print(wel.q.array) - print("FULLONE:") - print(wel.q.get_data()) - print("FULLTWO:") - print(wel.aux.array) - print("FULLONEAUX:") - print(wel.aux.get_data()) - print("FULLTWOAUX:") - aux = wel.aux.array - print(type(aux)) - print(dir(aux)) - print(f"SHAPE: {np.shape(aux)}") - print("1") - print(wel.aux.array[1, 0]) - print("2") - print(wel.aux.array[1, 1]) - assert np.allclose(wel.aux.array[0][0], wel.aux.get_data(0)[0]) - assert np.allclose(wel.aux.array[0][1], wel.aux.get_data(0)[1]) - assert np.allclose(wel.aux.array[1][0], wel.aux.get_data(1)[0]) - assert np.allclose(wel.aux.array[1][1], wel.aux.get_data(1)[1]) - assert np.allclose(wel.aux.array[2][0], wel.aux.get_data(2)[0]) - assert np.allclose(wel.aux.array[2][1], wel.aux.get_data(2)[1]) - assert np.allclose(wel.aux.array[3][0], wel.aux.get_data(3)[0]) - assert np.allclose(wel.aux.array[3][1], wel.aux.get_data(3)[1]) - - - #assert False - assert wel.q.get_data()[0] is None - assert wel.q.get_data(0) is None - assert np.allclose(wel.q.get_data()[1], wel.q.get_data(1)) - assert np.allclose(wel.q.get_data()[2], wel.q.get_data(2)) - assert len(wel.q.array) == 4 - print(wel.q.array[1]) - print("ONE:") - print(wel.q.get_data(1)) - print("TWO:") - print(wel.q.array) - print("FULLONE:") - print(wel.q.get_data()) - print("FULLTWO:") - assert np.allclose(wel.q.array[1], wel.q.get_data(1)) - assert np.allclose(wel.q.array[2], wel.q.get_data(2)) - assert wel.q.get_data()[3] is None - assert wel.q.get_data(3) is None - - - sim.write_simulation() - assert False - print("RENO WELG") - #print(welqspd) - assert not wel.has_stress_period_data - print(wel.q.array) - print(wel.q.get_data()) - print(np.shape(wel.q.array)) - q_nan = np.where(wel.q.array == DNODATA, np.nan, wel.q.array) - val_q = np.nansum(q_nan, axis=(1, 2, 3, 4)) - assert val_q[0] == 0.0 - assert val_q[1] == 0.25 - assert val_q[2] == 0.1 - assert val_q[3] == 0.1 - val_q_2 = wel.q.get_data() - assert val_q_2[0] is None - assert val_q_2[1][0, 0, 0] == 0.25 - assert val_q_2[2][0, 0, 0] == 0.1 - assert val_q_2[3] is None - print("RENO WELG AUX") - print(wel.aux.array) - print(wel.aux.get_data(0)) - print(wel.aux.get_data(1)) - print(wel.aux.get_data(2)) - print(wel.aux.get_data(3)) - print(wel.aux.array) - aux_data_0 = wel.aux.get_data(0) - assert aux_data_0 is None - aux_data_1 = wel.aux.get_data(1) - print(aux_data_1) - assert aux_data_1[0][0][0][0] == 50.0 - assert aux_data_1[1][0][0][0] == 1.3 - aux_data_2 = wel.aux.get_data(2) - assert aux_data_2 is None - aux_data_3 = wel.aux.get_data(3) - assert aux_data_3[0][0][0][0] == 200.0 - assert aux_data_3[1][0][0][0] == 1.5 - #print(wel.stress_period_data) - for k in wel.q._data_storage.keys(): - print(f"RENO k={k}") - wel.q.get_data_prep(k) - print(wel.q._data_storage[k].get_data()) - - print(type(wel.q)) - print(dir(wel.q)) - print(type(wel.q[0])) - print(dir(wel.q[0])) - #assert wel.q[0] is None - #assert wel.q[1[0][1] == 0.25 - - # remove test wel package - wel.remove() - - # create welg package - wel = ModflowGwfwelg( - model, - print_input=True, - print_flows=True, - mover=True, - save_flows=False, - auxiliary=["CONCENTRATION"], - pname="WEL-1", - q=welqspd, - aux=welconcspd, - ) - - print("RENO WELG 2") - print(wel) - assert not wel.has_stress_period_data - print(wel.q.array) - print(wel.q.get_data()) - print(np.shape(wel.q.array)) - q_nan = np.where(wel.q.array == DNODATA, np.nan, wel.q.array) - val_q = np.nansum(q_nan, axis=(1, 2, 3, 4)) - assert val_q[0] == 0.0 - assert val_q[1] == 0.25 - assert val_q[2] == 0.1 - assert val_q[3] == 0.1 - val_q_2 = wel.q.get_data() - assert val_q_2[0] is None - assert val_q_2[1][0, 0, 0] == 0.25 - assert val_q_2[2][0, 0, 0] == 0.1 - assert val_q_2[3] is None - print("RENO WELG AUX 2") - #print(welconcspd) - print(wel.aux.get_data()) - print(wel.aux.array) - print(wel.aux.array) - print(wel.aux.get_data(0)) - print(wel.aux.array) - print(wel.aux.get_data(1)) - print(wel.aux.get_data(2)) - print(wel.aux.get_data(3)) - print(wel.aux.array) - aux_data_0 = wel.aux.get_data(0) - print(wel.aux.array) - assert aux_data_0 is None - aux_data_1 = wel.aux.get_data(1) - print(aux_data_1) - assert aux_data_1[0][0][0][0] == 0.0 - assert aux_data_1[0][0, 0, 0] == 0.0 - aux_data_2 = wel.aux.get_data(2) - #assert aux_data_2[0][0][0] == 0.0 - assert aux_data_2[0][0, 0, 0] == 0.0 - aux_data_3 = wel.aux.get_data(3) - assert aux_data_3 is None - - print(wel.aux.get_data()) - print(wel.aux.array) - #assert False - - drnspdict = { - 0: [[(0, 0, 0), 60.0, 10.0]], - 2: [], - 3: [[(0, 0, 0), 55.0, 5.0]], - } - drn = ModflowGwfdrn( - model, - print_input=True, - print_flows=True, - stress_period_data=drnspdict, - save_flows=False, - pname="DRN-1", - ) - drn_array = drn.stress_period_data.array - assert drn_array[0][0][1] == 60.0 - assert drn_array[1][0][1] == 60.0 - assert drn_array[2] is None - assert drn_array[3][0][1] == 55.0 - drn_gd_0 = drn.stress_period_data.get_data(0) - assert drn_gd_0[0][1] == 60.0 - drn_gd_1 = drn.stress_period_data.get_data(1) - assert drn_gd_1 is None - drn_gd_2 = drn.stress_period_data.get_data(2) - assert len(drn_gd_2) == 0 - drn_gd_3 = drn.stress_period_data.get_data(3) - assert drn_gd_3[0][1] == 55.0 - - ghbspdict = { - 0: [[(0, 1, 1), 60.0, 10.0]], - } - ghb = ModflowGwfghb( - model, - print_input=True, - print_flows=True, - stress_period_data=ghbspdict, - save_flows=False, - pname="GHB-1", - ) - - lakpd = [(0, 70.0, 1), (1, 65.0, 1)] - lakecn = [ - (0, 0, (0, 0, 0), "HORIZONTAL", 1.0, 60.0, 90.0, 10.0, 1.0), - (1, 0, (0, 1, 1), "HORIZONTAL", 1.0, 60.0, 90.0, 10.0, 1.0), - ] - lak_tables = [(0, "lak01.tab"), (1, "lak02.tab")] - lak = ModflowGwflak( - model, - pname="lak", - print_input=True, - mover=True, - nlakes=2, - noutlets=0, - ntables=1, - packagedata=lakpd, - connectiondata=lakecn, - tables=lak_tables, - ) - - table_01 = [ - (30.0, 100000.0, 10000.0), - (40.0, 200500.0, 10100.0), - (50.0, 301200.0, 10130.0), - (60.0, 402000.0, 10180.0), - (70.0, 503000.0, 10200.0), - (80.0, 700000.0, 20000.0), - ] - lak_tab = ModflowUtllaktab( - model, - filename="lak01.tab", - nrow=6, - ncol=3, - table=table_01, - ) - - table_02 = [ - (40.0, 100000.0, 10000.0), - (50.0, 200500.0, 10100.0), - (60.0, 301200.0, 10130.0), - (70.0, 402000.0, 10180.0), - (80.0, 503000.0, 10200.0), - (90.0, 700000.0, 20000.0), - ] - lak_tab_2 = ModflowUtllaktab( - model, - filename="lak02.tab", - nrow=6, - ncol=3, - table=table_02, - ) - wel_name_1 = wel.name[0] - lak_name_2 = lak.name[0] - package_data = [(wel_name_1,), (lak_name_2,)] - period_data = [(wel_name_1, 0, lak_name_2, 0, "FACTOR", 1.0)] - fname = f"{model.name}.input.mvr" - mvr = ModflowGwfmvr( - parent_model_or_package=model, - filename=fname, - print_input=True, - print_flows=True, - maxpackages=2, - maxmvr=1, - packages=package_data, - perioddata=period_data, - ) - - # test writing and loading model - print(wel.aux.array) - sim.write_simulation() - print(wel.aux.array) - sim.run_simulation() - print(wel.aux.array) - - test_sim = MFSimulation.load( - sim_name, - "mf6", - "mf6", - out_dir, - write_headers=False, - ) - model = test_sim.get_model() - dis = model.get_package("dis") - rcha = model.get_package("rcha") - wel = model.get_package("wel") - drn = model.get_package("drn") - lak = model.get_package("lak") - lak_tab = model.get_package("laktab") - assert os.path.split(dis.filename)[1] == f"{model_name} 1.dis" - # do same tests as above - val_irch = rcha.irch.array.sum(axis=(1, 2, 3)) - assert val_irch[0] == 4 - assert val_irch[1] == 5 - assert val_irch[2] == 6 - assert val_irch[3] == 6 - val_irch_2 = rcha.irch.get_data() - assert val_irch_2[0] is None - assert val_irch_2[1][1, 1] == 1 - assert val_irch_2[2][1, 1] == 3 - assert val_irch_2[3] is None - val_rch = rcha.recharge.array.sum(axis=(1, 2, 3)) - assert val_rch[0] == 0.0 - assert val_rch[1] == 0.0004 - assert val_rch[2] == 0.00004 - assert val_rch[3] == 0.00004 - val_rch_2 = rcha.recharge.get_data() - assert val_rch_2[0] is None - assert val_rch_2[1][0, 0] == 0.0001 - assert val_rch_2[2][0, 0] == 0.00001 - assert val_rch_2[3] is None - aux_data_0 = rcha.aux.get_data(0) - assert aux_data_0 is None - aux_data_1 = rcha.aux.get_data(1) - assert aux_data_1[0][0][0] == 50.0 - aux_data_2 = rcha.aux.get_data(2) - assert aux_data_2 is None - aux_data_3 = rcha.aux.get_data(3) - assert aux_data_3[0][0][0] == 200.0 - - # RENO TODO - #wel_array = wel.stress_period_data.array - #assert wel_array[0] is None - #assert wel_array[1][0][1] == 0.25 - #assert wel_array[2][0][1] == 0.1 - #assert wel_array[3][0][1] == 0.1 - welg_q_per = wel.q.get_data() - #print(welg_q_per) - #print(wel.aux.get_data()) - print(wel.q.array) - print(wel.aux.array) - return - assert welg_q_per[0] == None - assert welg_q_per[1][0, 0, 0] == 0.25 - assert welg_q_per[2][0, 0, 0] == 0.1 - assert welg_q_per[3][0, 0, 0] == 0.1 - - - welg_q_per1 = wel.q.get_data(1) - print(wel.q.array) - assert welg_q_per1[0, 0, 0] == 0.25 - welg_aux_per1 = wel.aux.get_data(1) - assert welg_aux_per1[0][0, 0, 0] == 0.0 - - drn_array = drn.stress_period_data.array - assert drn_array[0][0][1] == 60.0 - assert drn_array[1][0][1] == 60.0 - assert drn_array[2] is None - assert drn_array[3][0][1] == 55.0 - drn_gd_0 = drn.stress_period_data.get_data(0) - assert drn_gd_0[0][1] == 60.0 - drn_gd_1 = drn.stress_period_data.get_data(1) - assert drn_gd_1 is None - drn_gd_2 = drn.stress_period_data.get_data(2) - assert len(drn_gd_2) == 0 - drn_gd_3 = drn.stress_period_data.get_data(3) - assert drn_gd_3[0][1] == 55.0 - - lak_tab_array = lak.tables.get_data() - assert lak_tab_array[0][1] == "lak01.tab" - assert lak_tab_array[1][1] == "lak02.tab" - - assert len(lak_tab) == 2 - lak_tab_1 = lak_tab[0].table.get_data() - assert lak_tab_1[0][0] == 30.0 - assert lak_tab_1[5][2] == 20000.0 - lak_tab_2 = lak_tab[1].table.get_data() - assert lak_tab_2[0][0] == 40.0 - assert lak_tab_2[4][1] == 503000.0 - - -@requires_exe("mf6") -def test_multi_model(function_tmpdir): - # init paths - test_ex_name = "test_multi_model" - model_names = ["gwf_model_1", "gwf_model_2", "gwt_model_1", "gwt_model_2"] - - # temporal discretization - nper = 1 - perlen = [5.0] - nstp = [200] - tsmult = [1.0] - tdis_rc = [] - for i in range(nper): - tdis_rc.append((perlen[i], nstp[i], tsmult[i])) - - # build MODFLOW 6 files - sim = MFSimulation( - sim_name=test_ex_name, - version="mf6", - exe_name="mf6", - sim_ws=str(function_tmpdir), - ) - # create tdis package - tdis = ModflowTdis( - sim, time_units="DAYS", nper=nper, perioddata=tdis_rc, pname="sim.tdis" - ) - - # grid information - nlay, nrow, ncol = 1, 1, 50 - - # Create gwf1 model - welspd = {0: [[(0, 0, 0), 1.0, 1.0]]} - chdspd = None - gwf1 = get_gwf_model( - sim, - model_names[0], - model_names[0], - (nlay, nrow, ncol), - chdspd=chdspd, - welspd=welspd, - ) - - # Create gwf2 model - welspd = {0: [[(0, 0, 1), 0.5, 0.5]]} - chdspd = {0: [[(0, 0, ncol - 1), 0.0000000]]} - gwf2 = get_gwf_model( - sim, - model_names[1], - model_names[1], - (nlay, nrow, ncol), - chdspd=chdspd, - welspd=welspd, - ) - lakpd = [(0, -100.0, 1)] - lakecn = [(0, 0, (0, 0, 0), "HORIZONTAL", 1.0, 0.1, 1.0, 10.0, 1.0)] - lak_2 = ModflowGwflak( - gwf2, - pname="lak2", - print_input=True, - mover=True, - nlakes=1, - noutlets=0, - ntables=0, - packagedata=lakpd, - connectiondata=lakecn, - ) - - # gwf-gwf - gwfgwf_data = [] - for col in range(0, ncol): - gwfgwf_data.append([(0, 0, col), (0, 0, col), 1, 0.5, 0.5, 1.0, 0.0, 1.0]) - gwfgwf = ModflowGwfgwf( - sim, - exgtype="GWF6-GWF6", - nexg=len(gwfgwf_data), - exgmnamea=gwf1.name, - exgmnameb=gwf2.name, - exchangedata=gwfgwf_data, - auxiliary=["ANGLDEGX", "CDIST"], - filename="flow1_flow2.gwfgwf", - ) - # set up mvr package - wel_1 = gwf1.get_package("wel") - wel_1.mover.set_data(True) - wel_name_1 = wel_1.name[0] - lak_name_2 = lak_2.name[0] - package_data = [(gwf1.name, wel_name_1), (gwf2.name, lak_name_2)] - period_data = [(gwf1.name, wel_name_1, 0, gwf2.name, lak_name_2, 0, "FACTOR", 1.0)] - fname = "gwfgwf.input.mvr" - gwfgwf.mvr.initialize( - filename=fname, - modelnames=True, - print_input=True, - print_flows=True, - maxpackages=2, - maxmvr=1, - packages=package_data, - perioddata=period_data, - ) - - gnc_data = [] - for col in range(0, ncol): - if col < ncol / 2.0: - gnc_data.append(((0, 0, col), (0, 0, col), (0, 0, col + 1), 0.25)) - else: - gnc_data.append(((0, 0, col), (0, 0, col), (0, 0, col - 1), 0.25)) - - # set up gnc package - fname = "gwfgwf.input.gnc" - gwfgwf.gnc.initialize( - filename=fname, - print_input=True, - print_flows=True, - numgnc=ncol, - numalphaj=1, - gncdata=gnc_data, - ) - - # Observe flow for exchange - gwfgwfobs = {} - obs_list = [] - for col in range(0, ncol): - obs_list.append([f"exchange_flow_{col}", "FLOW-JA-FACE", (col,)]) - gwfgwfobs["gwfgwf.output.obs.csv"] = obs_list - fname = "gwfgwf.input.obs" - gwfgwf.obs.initialize( - filename=fname, digits=25, print_input=True, continuous=gwfgwfobs - ) - - # Create gwt model - sourcerecarray = [("WEL-1", "AUX", "CONCENTRATION")] - gwt = get_gwt_model( - sim, - model_names[2], - model_names[2], - (nlay, nrow, ncol), - sourcerecarray=sourcerecarray, - ) - - # GWF GWT exchange - gwfgwt = ModflowGwfgwt( - sim, - exgtype="GWF6-GWT6", - exgmnamea=model_names[0], - exgmnameb=model_names[2], - filename="flow1_transport1.gwfgwt", - ) - - # solver settings - nouter, ninner = 100, 300 - hclose, rclose, relax = 1e-6, 1e-6, 1.0 - - # create iterative model solution and register the gwf model with it - imsgwf = ModflowIms( - sim, - print_option="SUMMARY", - outer_dvclose=hclose, - outer_maximum=nouter, - under_relaxation="NONE", - inner_maximum=ninner, - inner_dvclose=hclose, - rcloserecord=rclose, - linear_acceleration="BICGSTAB", - scaling_method="NONE", - reordering_method="NONE", - relaxation_factor=relax, - filename="flow.ims", - ) - - # create iterative model solution and register the gwt model with it - imsgwt = ModflowIms( - sim, - print_option="SUMMARY", - outer_dvclose=hclose, - outer_maximum=nouter, - under_relaxation="NONE", - inner_maximum=ninner, - inner_dvclose=hclose, - rcloserecord=rclose, - linear_acceleration="BICGSTAB", - scaling_method="NONE", - reordering_method="NONE", - relaxation_factor=relax, - filename="transport.ims", - ) - sim.register_ims_package(imsgwt, [gwt.name]) - - sim.write_simulation() - sim.run_simulation() - - # reload simulation - sim2 = MFSimulation.load(sim_ws=str(function_tmpdir)) - - # check ims registration - solution_recarray = sim2.name_file.solutiongroup - for solution_group_num in solution_recarray.get_active_key_list(): - rec_array = solution_recarray.get_data(solution_group_num[0]) - assert rec_array[0][1] == "flow.ims" - assert rec_array[0][2] == model_names[0] - assert rec_array[0][3] == model_names[1] - assert rec_array[1][1] == "transport.ims" - assert rec_array[1][2] == model_names[2] - assert gwf1.get_ims_package() is gwf2.get_ims_package() - assert gwf1.get_ims_package().filename == "flow.ims" - assert gwt.get_ims_package().filename == "transport.ims" - # test ssm fileinput - gwt2 = sim2.get_model("gwt_model_1") - ssm2 = gwt2.get_package("ssm") - fileinput = [ - ("RCH-1", "gwt_model_1.rch1.spc"), - ("RCH-2", "gwt_model_1.rch2.spc"), - ("RCH-3", "gwt_model_1.rch3.spc", "MIXED"), - ("RCH-4", "gwt_model_1.rch4.spc"), - ] - ssm2.fileinput = fileinput - fi_out = ssm2.fileinput.get_data() - assert fi_out[2][1] == "gwt_model_1.rch3.spc" - assert fi_out[1][2] is None - assert fi_out[2][2] == "MIXED" - - spca1 = ModflowUtlspca(gwt2, filename="gwt_model_1.rch1.spc", print_input=True) - spca2 = ModflowUtlspca(gwt2, filename="gwt_model_1.rch2.spc", print_input=False) - spca3 = ModflowUtlspca(gwt2, filename="gwt_model_1.rch3.spc", print_input=True) - spca4 = ModflowUtlspca(gwt2, filename="gwt_model_1.rch4.spc", print_input=True) - - # test writing and loading spca packages - sim2.write_simulation() - sim3 = MFSimulation.load(sim_ws=sim2.sim_path) - gwt3 = sim3.get_model("gwt_model_1") - spc1 = gwt3.get_package("gwt_model_1.rch1.spc") - assert isinstance(spc1, ModflowUtlspca) - assert spc1.print_input.get_data() is True - spc2 = gwt3.get_package("gwt_model_1.rch2.spc") - assert spc2.print_input.get_data() is not True - - # create a new gwt model - sourcerecarray = [("WEL-1", "AUX", "CONCENTRATION")] - gwt_2 = get_gwt_model( - sim, - model_names[3], - model_names[3], - (nlay, nrow, ncol), - sourcerecarray=sourcerecarray, - ) - # register gwt model with transport.ims - sim.register_ims_package(imsgwt, gwt_2.name) - # flow and transport exchange - gwfgwt = ModflowGwfgwt( - sim, - exgtype="GWF6-GWT6", - exgmnamea=model_names[1], - exgmnameb=model_names[3], - filename="flow2_transport2.gwfgwt", - ) - # save and run updated model - sim.write_simulation() - sim.run_simulation() - - with pytest.raises( - flopy.mf6.mfbase.FlopyException, - match='Extraneous kwargs "param_does_not_exist" provided to MFPackage.', - ): - # test kwargs error checking - wel = ModflowGwfwel( - gwf2, - print_input=True, - print_flows=True, - stress_period_data=welspd, - save_flows=False, - auxiliary="CONCENTRATION", - pname="WEL-1", - param_does_not_exist=True, - ) - - -@requires_exe("mf6") -def test_namefile_creation(function_tmpdir): - test_ex_name = "test_namefile" - # build MODFLOW 6 files - sim = MFSimulation( - sim_name=test_ex_name, - version="mf6", - exe_name="mf6", - sim_ws=str(function_tmpdir), - ) - - tdis_rc = [(6.0, 2, 1.0), (6.0, 3, 1.0), (6.0, 3, 1.0), (6.0, 3, 1.0)] - tdis = ModflowTdis(sim, time_units="DAYS", nper=4, perioddata=tdis_rc) - ims_package = ModflowIms( - sim, - pname="my_ims_file", - filename=f"{test_ex_name}.ims", - print_option="ALL", - complexity="SIMPLE", - outer_dvclose=0.0001, - outer_maximum=50, - under_relaxation="NONE", - inner_maximum=30, - inner_dvclose=0.0001, - linear_acceleration="CG", - preconditioner_levels=7, - preconditioner_drop_tolerance=0.01, - number_orthogonalizations=2, - ) - model = ModflowGwf( - sim, - modelname=test_ex_name, - model_nam_file=f"{test_ex_name}.nam", - ) - - # try to create simulation name file - ex_happened = False - try: - nam = ModflowNam(sim) - except flopy.mf6.mfbase.FlopyException: - ex_happened = True - assert ex_happened - - # try to create model name file - ex_happened = False - try: - nam = ModflowGwfnam(model) - except flopy.mf6.mfbase.FlopyException: - ex_happened = True - assert ex_happened - - -def test_remove_model(function_tmpdir, example_data_path): - # load a multi-model simulation - sim_ws = str(example_data_path / "mf6" / "test006_2models_mvr") - sim = MFSimulation.load(sim_ws=sim_ws, exe_name="mf6") - - # original simulation should contain models: - # - 'parent', with files named 'model1.ext' - # - 'child', with files named 'model2.ext' - assert len(sim.model_names) == 2 - assert "parent" in sim.model_names - assert "child" in sim.model_names - - # remove the child model - sim.remove_model("child") - - # simulation should now only contain the parent model - assert len(sim.model_names) == 1 - assert "parent" in sim.model_names - - # write simulation input files - sim.set_sim_path(function_tmpdir) - sim.write_simulation() - - # there should be no input files for the child model - files = list(function_tmpdir.glob("*")) - assert not any("model2" in f.name for f in files) - - # there should be no model or solver entry for the child model - # in the simulation namefile - lines = open(function_tmpdir / "mfsim.nam").readlines() - lines = [l.lower().strip() for l in lines] - assert not any("model2" in l for l in lines) - assert not any("child" in l for l in lines) - - # there should be no exchanges either - exg_index = 0 - for i, l in enumerate(lines): - if "begin exchanges" in l: - exg_index = i - elif exg_index > 0: - assert "end exchanges" in l - break - - -@requires_pkg("shapely") -@requires_exe("triangle") -def test_flopy_2283(function_tmpdir): - # create triangular grid - triangle_ws = function_tmpdir / "triangle" - triangle_ws.mkdir() - - active_area = [(0, 0), (0, 1000), (1000, 1000), (1000, 0)] - tri = Triangle(model_ws=triangle_ws, angle=30) - tri.add_polygon(active_area) - tri.add_region((1, 1), maximum_area=50**2) - - tri.build() - - # build vertex grid object - vgrid = flopy.discretization.VertexGrid( - vertices=tri.get_vertices(), - cell2d=tri.get_cell2d(), - xoff=199000, - yoff=215500, - crs=31370, - angrot=30, - ) - - # coord info is set (also correct when using vgrid.set_coord_info() - print(vgrid) - - # create MODFLOW 6 model - ws = function_tmpdir / "model" - ws.mkdir() - sim = flopy.mf6.MFSimulation(sim_name="prj-test", sim_ws=ws) - tdis = flopy.mf6.ModflowTdis(sim) - ims = flopy.mf6.ModflowIms(sim) - - gwf = flopy.mf6.ModflowGwf(sim, modelname="gwf") - disv = flopy.mf6.ModflowGwfdisv( - gwf, - xorigin=vgrid.xoffset, - yorigin=vgrid.yoffset, - angrot=vgrid.angrot, # no CRS info can be set in DISV - nlay=1, - top=0.0, - botm=-10.0, - ncpl=vgrid.ncpl, - nvert=vgrid.nvert, - cell2d=vgrid.cell2d, - vertices=tri.get_vertices(), # this is not stored in the Vertex grid object? - ) - - assert gwf.modelgrid.xoffset == disv.xorigin.get_data() - assert gwf.modelgrid.yoffset == disv.yorigin.get_data() - assert gwf.modelgrid.angrot == disv.angrot.get_data() diff --git a/autotest/tmp/test_mf6.py b/autotest/tmp/test_mf6.py deleted file mode 100644 index b723212803..0000000000 --- a/autotest/tmp/test_mf6.py +++ /dev/null @@ -1,2930 +0,0 @@ -import os -import platform -from pathlib import Path -from shutil import copytree, which - -import numpy as np -import pytest -from modflow_devtools.markers import requires_exe, requires_pkg -from modflow_devtools.misc import set_dir - -import flopy -from flopy.mf6 import ( - MFModel, - MFSimulation, - ModflowGwf, - ModflowGwfchd, - ModflowGwfdis, - ModflowGwfdisu, - ModflowGwfdisv, - ModflowGwfdrn, - ModflowGwfevt, - ModflowGwfevta, - ModflowGwfghb, - ModflowGwfgnc, - ModflowGwfgwf, - ModflowGwfgwt, - ModflowGwfhfb, - ModflowGwfic, - ModflowGwflak, - ModflowGwfmaw, - ModflowGwfmvr, - ModflowGwfnam, - ModflowGwfnpf, - ModflowGwfoc, - ModflowGwfrch, - ModflowGwfrcha, - ModflowGwfriv, - ModflowGwfsfr, - ModflowGwfsto, - ModflowGwfuzf, - ModflowGwfwel, - ModflowGwfwelg, - ModflowGwtadv, - ModflowGwtdis, - ModflowGwtic, - ModflowGwtmst, - ModflowGwtoc, - ModflowGwtssm, - ModflowIms, - ModflowNam, - ModflowTdis, - ModflowUtllaktab, - ModflowUtlspca, -) -from flopy.mf6.coordinates.modeldimensions import ( - DataDimensions, - ModelDimensions, - PackageDimensions, -) -from flopy.mf6.data.mffileaccess import MFFileAccessArray -from flopy.mf6.data.mfstructure import MFDataItemStructure, MFDataStructure -from flopy.mf6.mfsimbase import MFSimulationData -from flopy.mf6.modflow import ( - mfgwf, - mfgwfdis, - mfgwfdrn, - mfgwfic, - mfgwfnpf, - mfgwfoc, - mfgwfriv, - mfgwfsto, - mfgwfwel, - mfims, - mftdis, -) -from flopy.utils import CellBudgetFile, HeadFile, Mf6ListBudget, Mf6Obs, ZoneBudget6 -from flopy.utils.observationfile import CsvFile -from flopy.utils.triangle import Triangle -from flopy.utils.voronoi import VoronoiGrid - -pytestmark = pytest.mark.mf6 - - -def write_head( - fbin, - data, - kstp=1, - kper=1, - pertim=1.0, - totim=1.0, - text=" HEAD", - ilay=1, -): - dt = np.dtype( - [ - ("kstp", "i4"), - ("kper", "i4"), - ("pertim", "f8"), - ("totim", "f8"), - ("text", "S16"), - ("ncol", "i4"), - ("nrow", "i4"), - ("ilay", "i4"), - ] - ) - nrow = data.shape[0] - ncol = data.shape[1] - h = np.array((kstp, kper, pertim, totim, text, ncol, nrow, ilay), dtype=dt) - h.tofile(fbin) - data.tofile(fbin) - - -def get_gwf_model(sim, gwfname, gwfpath, modelshape, chdspd=None, welspd=None): - nlay, nrow, ncol = modelshape - delr = 1.0 - delc = 1.0 - top = 1.0 - botm = [0.0] - strt = 1.0 - hk = 1.0 - laytyp = 0 - - gwf = ModflowGwf( - sim, - modelname=gwfname, - save_flows=True, - ) - gwf.set_model_relative_path(gwfpath) - - dis = ModflowGwfdis( - gwf, - nlay=nlay, - nrow=nrow, - ncol=ncol, - delr=delr, - delc=delc, - top=top, - botm=botm, - ) - - # initial conditions - ic = ModflowGwfic(gwf, strt=strt) - - # node property flow - npf = ModflowGwfnpf( - gwf, - icelltype=laytyp, - k=hk, - save_specific_discharge=True, - ) - - # chd files - if chdspd is not None: - chd = ModflowGwfchd( - gwf, - stress_period_data=chdspd, - save_flows=False, - pname="CHD-1", - ) - - # wel files - if welspd is not None: - wel = ModflowGwfwel( - gwf, - print_input=True, - print_flows=True, - stress_period_data=welspd, - save_flows=False, - auxiliary="CONCENTRATION", - pname="WEL-1", - ) - - # output control - oc = ModflowGwfoc( - gwf, - budget_filerecord=f"{gwfname}.cbc", - head_filerecord=f"{gwfname}.hds", - headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")], - saverecord=[("HEAD", "LAST"), ("BUDGET", "LAST")], - printrecord=[("HEAD", "LAST"), ("BUDGET", "LAST")], - ) - return gwf - - -def get_gwt_model(sim, gwtname, gwtpath, modelshape, sourcerecarray=None): - nlay, nrow, ncol = modelshape - delr = 1.0 - delc = 1.0 - top = 1.0 - botm = [0.0] - strt = 1.0 - hk = 1.0 - laytyp = 0 - - gwt = MFModel( - sim, - model_type="gwt6", - modelname=gwtname, - model_rel_path=gwtpath, - ) - gwt.name_file.save_flows = True - - dis = ModflowGwtdis( - gwt, - nlay=nlay, - nrow=nrow, - ncol=ncol, - delr=delr, - delc=delc, - top=top, - botm=botm, - ) - - # initial conditions - ic = ModflowGwtic(gwt, strt=0.0) - - # advection - adv = ModflowGwtadv(gwt, scheme="upstream") - - # mass storage and transfer - mst = ModflowGwtmst(gwt, porosity=0.1) - - # sources - ssm = ModflowGwtssm(gwt, sources=sourcerecarray) - - # output control - oc = ModflowGwtoc( - gwt, - budget_filerecord=f"{gwtname}.cbc", - concentration_filerecord=f"{gwtname}.ucn", - concentrationprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")], - saverecord=[("CONCENTRATION", "LAST"), ("BUDGET", "LAST")], - printrecord=[("CONCENTRATION", "LAST"), ("BUDGET", "LAST")], - ) - return gwt - - -def to_win_sep(s): - return s.replace("/", "\\") - - -def to_posix_sep(s): - return s.replace("\\", "/") - - -def to_os_sep(s): - return s.replace("\\", os.sep).replace("/", os.sep) - - -@requires_exe("mf6") -def test_load_and_run_sim_when_namefile_uses_filenames( - function_tmpdir, example_data_path -): - # copy model input files to temp workspace - model_name = "mf6-freyberg" - workspace = function_tmpdir / model_name - copytree(example_data_path / model_name, workspace) - - # load, check and run simulation - sim = MFSimulation.load(sim_ws=workspace) - sim.check() - success, _ = sim.run_simulation(report=True) - assert success - - -@requires_exe("mf6") -def test_load_and_run_sim_when_namefile_uses_abs_paths( - function_tmpdir, example_data_path -): - # copy model input files to temp workspace - model_name = "freyberg" - workspace = function_tmpdir / "ws" - copytree(example_data_path / f"mf6-{model_name}", workspace) - - # sub abs paths into namefile - with set_dir(workspace): - nam_path = workspace / "mfsim.nam" - lines = open(nam_path).readlines() - with open(nam_path, "w") as f: - for l in lines: - pattern = f"{model_name}." - if pattern in l: - l = l.replace(pattern, str(workspace.absolute()) + os.sep + pattern) - f.write(l) - - # load, check and run simulation - sim = MFSimulation.load(sim_ws=workspace) - sim.check() - success, _ = sim.run_simulation(report=True) - assert success - - -@requires_exe("mf6") -@pytest.mark.parametrize("sep", ["win", "posix"]) -def test_load_sim_when_namefile_uses_rel_paths(function_tmpdir, example_data_path, sep): - # copy model input files to temp workspace - model_name = "freyberg" - workspace = function_tmpdir / "ws" - copytree(example_data_path / f"mf6-{model_name}", workspace) - - # sub rel paths into namefile - with set_dir(workspace): - nam_path = workspace / "mfsim.nam" - lines = open(nam_path).readlines() - with open(nam_path, "w") as f: - for l in lines: - pattern = f"{model_name}." - if pattern in l: - if sep == "win": - l = to_win_sep( - l.replace( - pattern, "../" + workspace.name + "/" + model_name + "." - ) - ) - else: - l = to_posix_sep( - l.replace( - pattern, "../" + workspace.name + "/" + model_name + "." - ) - ) - f.write(l) - - # load and check simulation - sim = MFSimulation.load(sim_ws=workspace) - sim.check() - - # don't run simulation with Windows sep on Linux or Mac - if sep == "win" and platform.system() != "Windows": - return - - # run simulation - success, _ = sim.run_simulation(report=True) - assert success - - -@pytest.mark.skip(reason="currently flopy uses OS-specific path separators") -@pytest.mark.parametrize("sep", ["win", "posix"]) -def test_write_simulation_always_writes_posix_path_separators( - function_tmpdir, example_data_path, sep -): - # copy model input files to temp workspace - model_name = "freyberg" - workspace = function_tmpdir / "ws" - copytree(example_data_path / f"mf6-{model_name}", workspace) - - # use OS-specific path separators - with set_dir(workspace): - nam_path = workspace / "mfsim.nam" - lines = open(nam_path).readlines() - with open(nam_path, "w") as f: - for l in lines: - pattern = f"{model_name}." - if pattern in l: - if sep == "win": - l = to_win_sep( - l.replace( - pattern, "../" + workspace.name + "/" + model_name + "." - ) - ) - else: - l = to_posix_sep( - l.replace( - pattern, "../" + workspace.name + "/" + model_name + "." - ) - ) - f.write(l) - - # load and write simulation - sim = MFSimulation.load(sim_ws=workspace) - sim.write_simulation() - - # make sure posix separators were written - lines = open(workspace / "mfsim.nam").readlines() - assert all("\\" not in l for l in lines) - - -@requires_exe("mf6") -@pytest.mark.parametrize("filename", ["name", "rel", "rel_win"]) -def test_basic_gwf(function_tmpdir, filename): - ws = function_tmpdir - name = "basic_gwf_prep" - sim = flopy.mf6.MFSimulation(sim_name=name, sim_ws=ws, exe_name="mf6") - pd = [(1.0, 1, 1.0), (1.0, 1, 1.0)] - - innerdir = Path(function_tmpdir / "inner") - innerdir.mkdir() - - # mfpackage filename can be path or string.. - # if string, it can either be a file name or - # path relative to the simulation workspace. - tdis_name = f"{name}.tdis" - tdis_path = innerdir / tdis_name - tdis_path.touch() - tdis_relpath = tdis_path.relative_to(ws).as_posix() - tdis_relpath_win = str(tdis_relpath).replace("/", "\\") - - if filename == "name": - # file named with no path will be created in simulation workspace - tdis = flopy.mf6.ModflowTdis( - sim, nper=len(pd), perioddata=pd, filename=tdis_name - ) - assert tdis.filename == tdis_name - elif filename == "rel": - # filename may be a relative pathlib.Path - tdis = flopy.mf6.ModflowTdis( - sim, nper=len(pd), perioddata=pd, filename=tdis_relpath - ) - assert tdis.filename == str(tdis_relpath) - - # relative paths may also be provided as strings - tdis = flopy.mf6.ModflowTdis( - sim, nper=len(pd), perioddata=pd, filename=str(tdis_relpath) - ) - assert tdis.filename == str(tdis_relpath) - elif filename == "rel_win": - # windows path backslash separator should be converted to forward slash - tdis = flopy.mf6.ModflowTdis( - sim, nper=len(pd), perioddata=pd, filename=tdis_relpath_win - ) - assert tdis.filename == str(tdis_relpath) - - # create other packages - ims = flopy.mf6.ModflowIms(sim) - gwf = flopy.mf6.ModflowGwf(sim, modelname=name, save_flows=True) - dis = flopy.mf6.ModflowGwfdis(gwf, nrow=10, ncol=10) - ic = flopy.mf6.ModflowGwfic(gwf) - npf = flopy.mf6.ModflowGwfnpf( - gwf, save_specific_discharge=True, save_saturation=True - ) - spd = { - 0: [[(0, 0, 0), 1.0, 1.0], [(0, 9, 9), 0.0, 0.0]], - 1: [[(0, 0, 0), 0.0, 0.0], [(0, 9, 9), 1.0, 2.0]], - } - chd = flopy.mf6.ModflowGwfchd( - gwf, pname="CHD-1", stress_period_data=spd, auxiliary=["concentration"] - ) - budget_file = f"{name}.bud" - head_file = f"{name}.hds" - oc = flopy.mf6.ModflowGwfoc( - gwf, - budget_filerecord=budget_file, - head_filerecord=head_file, - saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], - ) - - # write the simulation - sim.write_simulation() - - # check for input files - assert (ws / innerdir / tdis_name).is_file() - assert (ws / f"{name}.ims").is_file() - assert (ws / f"{name}.dis").is_file() - assert (ws / f"{name}.ic").is_file() - assert (ws / f"{name}.npf").is_file() - assert (ws / f"{name}.chd").is_file() - assert (ws / f"{name}.oc").is_file() - - # run the simulation - sim.run_simulation() - - # check for output files - assert (ws / budget_file).is_file() - assert (ws / head_file).is_file() - - -def test_subdir(function_tmpdir): - sim = MFSimulation(sim_ws=function_tmpdir) - assert sim.sim_path == function_tmpdir - - tdis = ModflowTdis(sim) - gwf = ModflowGwf(sim, model_rel_path="level2") - ims = ModflowIms(sim) - sim.register_ims_package(ims, []) - dis = ModflowGwfdis(gwf) - sim.set_all_data_external(external_data_folder="dat") - sim.write_simulation() - - sim_r = MFSimulation.load( - "mfsim.nam", - sim_ws=sim.simulation_data.mfpath.get_sim_path(), - ) - gwf_r = sim_r.get_model() - assert gwf.dis.delc.get_file_entry() == gwf_r.dis.delc.get_file_entry(), ( - "Something wrong with model external paths" - ) - - sim_r.set_all_data_internal() - sim_r.set_all_data_external(external_data_folder=os.path.join("dat", "dat_l2")) - sim_r.write_simulation() - - sim_r2 = MFSimulation.load( - "mfsim.nam", - sim_ws=sim_r.simulation_data.mfpath.get_sim_path(), - ) - gwf_r2 = sim_r.get_model() - assert gwf_r.dis.delc.get_file_entry() == gwf_r2.dis.delc.get_file_entry(), ( - "Something wrong with model external paths" - ) - - -@requires_exe("mf6") -@pytest.mark.parametrize("layered", [True, False]) -def test_binary_write(function_tmpdir, layered): - nlay, nrow, ncol = 2, 1, 10 - shape2d = (nrow, ncol) - - # data for layers - botm = [4.0, 0.0] - strt = [5.0, 10.0] - - # create binary data structured - if layered: - idomain_data = [] - botm_data = [] - strt_data = [] - for k in range(nlay): - idomain_data.append( - { - "factor": 1.0, - "filename": f"idomain_l{k + 1}.bin", - "data": 1, - "binary": True, - "iprn": 1, - } - ) - botm_data.append( - { - "filename": f"botm_l{k + 1}.bin", - "binary": True, - "iprn": 1, - "data": np.full(shape2d, botm[k], dtype=float), - } - ) - strt_data.append( - { - "filename": f"strt_l{k + 1}.bin", - "binary": True, - "iprn": 1, - "data": np.full(shape2d, strt[k], dtype=float), - } - ) - else: - idomain_data = { - "filename": "idomain.bin", - "binary": True, - "iprn": 1, - "data": 1, - } - botm_data = { - "filename": "botm.bin", - "binary": True, - "iprn": 1, - "data": np.array( - [ - np.full(shape2d, botm[0], dtype=float), - np.full(shape2d, botm[1], dtype=float), - ] - ), - } - strt_data = { - "filename": "strt.bin", - "binary": True, - "iprn": 1, - "data": np.array( - [ - np.full(shape2d, strt[0], dtype=float), - np.full(shape2d, strt[1], dtype=float), - ] - ), - } - - # binary data that does not vary by layers - top_data = { - "filename": "top.bin", - "binary": True, - "iprn": 1, - "data": 10.0, - } - rch_data = { - 0: { - "filename": "recharge.bin", - "binary": True, - "iprn": 1, - "data": 0.000001, - }, - } - chd_data = [ - (1, 0, 0, 10.0, 1.0, 100.0), - (1, 0, ncol - 1, 5.0, 0.0, 100.0), - ] - chd_data = { - 0: { - "filename": "chd.bin", - "binary": True, - "iprn": 1, - "data": chd_data, - }, - } - - sim = MFSimulation(sim_ws=str(function_tmpdir)) - ModflowTdis(sim) - ModflowIms(sim, complexity="simple") - gwf = ModflowGwf(sim, print_input=True) - ModflowGwfdis( - gwf, - nlay=nlay, - nrow=nrow, - ncol=ncol, - delr=1.0, - delc=1.0, - top=top_data, - botm=botm_data, - idomain=idomain_data, - ) - ModflowGwfnpf( - gwf, - icelltype=1, - ) - ModflowGwfic( - gwf, - strt=strt_data, - ) - ModflowGwfchd( - gwf, - auxiliary=["conc", "something"], - stress_period_data=chd_data, - ) - ModflowGwfrcha(gwf, recharge=rch_data) - - sim.write_simulation() - success, buff = sim.run_simulation() - assert success - - -@requires_exe("mf6") -@requires_pkg("shapely", "scipy") -@pytest.mark.parametrize("layered", [True, False]) -def test_vor_binary_write(function_tmpdir, layered): - # build voronoi grid - boundary = [(0.0, 0.0), (0.0, 1.0), (10.0, 1.0), (10.0, 0.0)] - triangle_ws = function_tmpdir / "triangle" - triangle_ws.mkdir(parents=True, exist_ok=True) - - tri = Triangle( - angle=30, - maximum_area=1.0, - model_ws=triangle_ws, - ) - tri.add_polygon(boundary) - tri.build(verbose=False) - vor = VoronoiGrid(tri) - - # problem dimensions - nlay = 2 - - # data for layers - botm = [4.0, 0.0] - strt = [5.0, 10.0] - - # build binary data - if layered: - idomain_data = [] - botm_data = [] - strt_data = [] - for k in range(nlay): - idomain_data.append( - { - "factor": 1.0, - "filename": f"idomain_l{k + 1}.bin", - "data": 1, - "binary": True, - "iprn": 1, - } - ) - botm_data.append( - { - "filename": f"botm_l{k + 1}.bin", - "binary": True, - "iprn": 1, - "data": np.full(vor.ncpl, botm[k], dtype=float), - } - ) - strt_data.append( - { - "filename": f"strt_l{k + 1}.bin", - "binary": True, - "iprn": 1, - "data": np.full(vor.ncpl, strt[k], dtype=float), - } - ) - else: - idomain_data = { - "filename": "idomain.bin", - "binary": True, - "iprn": 1, - "data": 1, - } - botm_data = { - "filename": "botm.bin", - "binary": True, - "iprn": 1, - "data": np.array( - [ - np.full(vor.ncpl, botm[0], dtype=float), - np.full(vor.ncpl, botm[1], dtype=float), - ] - ), - } - strt_data = { - "filename": "strt.bin", - "binary": True, - "iprn": 1, - "data": np.array( - [ - np.full(vor.ncpl, strt[0], dtype=float), - np.full(vor.ncpl, strt[1], dtype=float), - ] - ), - } - - # binary data that does not vary by layers - top_data = { - "filename": "top.bin", - "binary": True, - "iprn": 1, - "data": 10.0, - } - rch_data = { - 0: { - "filename": "recharge.bin", - "binary": True, - "iprn": 1, - "data": np.full(vor.ncpl, 0.000001, dtype=float), - }, - } - chd_data = [ - (1, 0, 10.0, 1.0, 100.0), - (1, 1, 10.0, 1.0, 100.0), - (1, 2, 5.0, 0.0, 100.0), - (1, 3, 5.0, 0.0, 100.0), - ] - chd_data = { - 0: { - "filename": "chd.bin", - "binary": True, - "data": chd_data, - }, - } - - # build model - sim = MFSimulation(sim_ws=str(function_tmpdir)) - ModflowTdis(sim) - ModflowIms(sim, complexity="simple") - gwf = ModflowGwf(sim, print_input=True) - flopy.mf6.ModflowGwfdisv( - gwf, - nlay=nlay, - ncpl=vor.ncpl, - nvert=vor.nverts, - vertices=vor.get_disv_gridprops()["vertices"], - cell2d=vor.get_disv_gridprops()["cell2d"], - top=top_data, - botm=botm_data, - idomain=idomain_data, - xorigin=0.0, - yorigin=0.0, - ) - ModflowGwfnpf( - gwf, - icelltype=1, - ) - ModflowGwfic( - gwf, - strt=strt_data, - ) - ModflowGwfrcha(gwf, recharge=rch_data) - ModflowGwfchd( - gwf, - auxiliary=["conc", "something"], - stress_period_data=chd_data, - ) - sim.write_simulation() - success, buff = sim.run_simulation() - assert success - - -def test_binary_read(function_tmpdir): - test_ex_name = "binary_read" - nlay = 3 - nrow = 10 - ncol = 10 - - modelgrid = flopy.discretization.StructuredGrid(nlay=nlay, nrow=nrow, ncol=ncol) - - arr = np.arange(nlay * nrow * ncol).astype(np.float64) - data_shape = (nlay, nrow, ncol) - data_size = nlay * nrow * ncol - arr.shape = data_shape - - sim_data = MFSimulationData("integration", None) - dstruct = MFDataItemStructure() - dstruct.is_cellid = False - dstruct.name = "fake" - dstruct.data_items = [ - None, - ] - mfstruct = MFDataStructure(dstruct, False, "ic", None) - mfstruct.data_item_structures = [ - dstruct, - ] - mfstruct.path = [ - "fake", - ] - - md = ModelDimensions("test", None) - pd = PackageDimensions([md], None, "integration") - dd = DataDimensions(pd, mfstruct) - - binfile = function_tmpdir / "structured_layered.hds" - with open(binfile, "wb") as foo: - for ix, a in enumerate(arr): - write_head(foo, a, ilay=ix) - - fa = MFFileAccessArray(mfstruct, dd, sim_data, None, None) - - # test path as both Path and str - for bf in [binfile, str(binfile)]: - arr2 = fa.read_binary_data_from_file( - bf, data_shape, data_size, np.float64, modelgrid - )[0] - - assert np.allclose(arr, arr2), ( - "Binary read for layered structured failed with " - + ("Path" if isinstance(binfile, Path) else "str") - ) - - binfile = function_tmpdir / "structured_flat.hds" - with open(binfile, "wb") as foo: - a = np.expand_dims(np.ravel(arr), axis=0) - write_head(foo, a, ilay=1) - - arr2 = fa.read_binary_data_from_file( - binfile, data_shape, data_size, np.float64, modelgrid - )[0] - - assert np.allclose(arr, arr2), "Binary read for flat Structured failed" - - ncpl = nrow * ncol - data_shape = (nlay, ncpl) - arr.shape = data_shape - modelgrid = flopy.discretization.VertexGrid(nlay=nlay, ncpl=ncpl) - - fa = MFFileAccessArray(mfstruct, dd, sim_data, None, None) - - binfile = function_tmpdir / "vertex_layered.hds" - with open(binfile, "wb") as foo: - tarr = arr.reshape((nlay, 1, ncpl)) - for ix, a in enumerate(tarr): - write_head(foo, a, ilay=ix) - - arr2 = fa.read_binary_data_from_file( - binfile, data_shape, data_size, np.float64, modelgrid - )[0] - - assert np.allclose(arr, arr2), "Binary read for layered Vertex failed" - - binfile = function_tmpdir / "vertex_flat.hds" - with open(binfile, "wb") as foo: - a = np.expand_dims(np.ravel(arr), axis=0) - write_head(foo, a, ilay=1) - - arr2 = fa.read_binary_data_from_file( - binfile, data_shape, data_size, np.float64, modelgrid - )[0] - - assert np.allclose(arr, arr2), "Binary read for flat Vertex failed" - - nlay = 3 - ncpl = [50, 100, 150] - data_shape = (np.sum(ncpl),) - arr.shape = data_shape - modelgrid = flopy.discretization.UnstructuredGrid(ncpl=ncpl) - - fa = MFFileAccessArray(mfstruct, dd, sim_data, None, None) - - binfile = function_tmpdir / "unstructured.hds" - with open(binfile, "wb") as foo: - a = np.expand_dims(arr, axis=0) - write_head(foo, a, ilay=1) - - arr2 = fa.read_binary_data_from_file( - binfile, data_shape, data_size, np.float64, modelgrid - )[0] - - assert np.allclose(arr, arr2), "Binary read for Unstructured failed" - - -@requires_exe("mf6") -def test_props_and_write(function_tmpdir): - # workspace as str - sim = MFSimulation(sim_ws=str(function_tmpdir)) - assert isinstance(sim, MFSimulation) - assert sim.simulation_data.mfpath.get_sim_path() == function_tmpdir == sim.sim_path - - # workspace as Path - sim = MFSimulation(sim_ws=function_tmpdir) - assert isinstance(sim, MFSimulation) - assert sim.simulation_data.mfpath.get_sim_path() == function_tmpdir == sim.sim_path - - tdis = ModflowTdis(sim) - assert isinstance(tdis, ModflowTdis) - - gwfgwf = ModflowGwfgwf(sim, exgtype="gwf6-gwf6", exgmnamea="gwf1", exgmnameb="gwf2") - assert isinstance(gwfgwf, ModflowGwfgwf) - - gwf = ModflowGwf(sim) - assert isinstance(gwf, ModflowGwf) - - ims = ModflowIms(sim) - assert isinstance(ims, ModflowIms) - sim.register_ims_package(ims, []) - - dis = ModflowGwfdis(gwf) - assert isinstance(dis, ModflowGwfdis) - - disu = ModflowGwfdisu(gwf) - assert isinstance(disu, ModflowGwfdisu) - - disv = ModflowGwfdisv(gwf) - assert isinstance(disv, ModflowGwfdisv) - - npf = ModflowGwfnpf(gwf) - assert isinstance(npf, ModflowGwfnpf) - - ic = ModflowGwfic(gwf) - assert isinstance(ic, ModflowGwfic) - - sto = ModflowGwfsto(gwf) - assert isinstance(sto, ModflowGwfsto) - - hfb = ModflowGwfhfb(gwf) - assert isinstance(hfb, ModflowGwfhfb) - - gnc = ModflowGwfgnc(gwf) - assert isinstance(gnc, ModflowGwfgnc) - - chd = ModflowGwfchd(gwf) - assert isinstance(chd, ModflowGwfchd) - - wel = ModflowGwfwel(gwf) - assert isinstance(wel, ModflowGwfwel) - - drn = ModflowGwfdrn(gwf) - assert isinstance(drn, ModflowGwfdrn) - - riv = ModflowGwfriv(gwf) - assert isinstance(riv, ModflowGwfriv) - - ghb = ModflowGwfghb(gwf) - assert isinstance(ghb, ModflowGwfghb) - - rch = ModflowGwfrch(gwf) - assert isinstance(rch, ModflowGwfrch) - - rcha = ModflowGwfrcha(gwf) - assert isinstance(rcha, ModflowGwfrcha) - - evt = ModflowGwfevt(gwf) - assert isinstance(evt, ModflowGwfevt) - - evta = ModflowGwfevta(gwf) - assert isinstance(evta, ModflowGwfevta) - - maw = ModflowGwfmaw(gwf) - assert isinstance(maw, ModflowGwfmaw) - - sfr = ModflowGwfsfr(gwf) - assert isinstance(sfr, ModflowGwfsfr) - - lak = ModflowGwflak(gwf) - assert isinstance(lak, ModflowGwflak) - - uzf = ModflowGwfuzf(gwf) - assert isinstance(uzf, ModflowGwfuzf) - - mvr = ModflowGwfmvr(gwf) - assert isinstance(mvr, ModflowGwfmvr) - - # Write files - sim.write_simulation() - - # Verify files were written - assert os.path.isfile(os.path.join(str(function_tmpdir), "mfsim.nam")) - exts_model = [ - "nam", - "dis", - "disu", - "disv", - "npf", - "ic", - "sto", - "hfb", - "gnc", - "chd", - "wel", - "drn", - "riv", - "ghb", - "rch", - "rcha", - "evt", - "evta", - "maw", - "sfr", - "lak", - "mvr", - ] - exts_sim = ["gwfgwf", "ims", "tdis"] - for ext in exts_model: - fname = os.path.join(str(function_tmpdir), f"model.{ext}") - assert os.path.isfile(fname), f"{fname} not found" - for ext in exts_sim: - fname = os.path.join(str(function_tmpdir), f"sim.{ext}") - assert os.path.isfile(fname), f"{fname} not found" - - -@pytest.mark.parametrize("use_paths", [True, False]) -def test_set_sim_path(function_tmpdir, use_paths): - sim_name = "testsim" - model_name = "testmodel" - exe_name = "mf6" - - # set up simulation - tdis_name = f"{sim_name}.tdis" - sim = MFSimulation( - sim_name=sim_name, - version="mf6", - exe_name=exe_name, - sim_ws=function_tmpdir, - ) - - new_ws = function_tmpdir / "new_ws" - new_ws.mkdir() - sim.set_sim_path(new_ws if use_paths else str(new_ws)) - - tdis_rc = [(6.0, 2, 1.0), (6.0, 3, 1.0)] - tdis = mftdis.ModflowTdis(sim, time_units="DAYS", nper=2, perioddata=tdis_rc) - - # create model instance - model = mfgwf.ModflowGwf( - sim, modelname=model_name, model_nam_file=f"{model_name}.nam" - ) - - sim.write_simulation() - - assert len([p for p in function_tmpdir.glob("*") if p.is_file()]) == 0 - assert len([p for p in new_ws.glob("*") if p.is_file()]) > 0 - - -@requires_exe("mf6") -@pytest.mark.parametrize("use_paths", [True, False]) -def test_create_and_run_model(function_tmpdir, use_paths): - # names - sim_name = "testsim" - model_name = "testmodel" - exe_name = "mf6" - - # set up simulation - tdis_name = f"{sim_name}.tdis" - if use_paths: - sim = MFSimulation( - sim_name=sim_name, - version="mf6", - exe_name=Path(which(exe_name)), - sim_ws=function_tmpdir, - ) - else: - sim = MFSimulation( - sim_name=sim_name, - version="mf6", - exe_name=str(exe_name), - sim_ws=str(function_tmpdir), - ) - tdis_rc = [(6.0, 2, 1.0), (6.0, 3, 1.0)] - tdis = mftdis.ModflowTdis(sim, time_units="DAYS", nper=2, perioddata=tdis_rc) - - # create model instance - model = mfgwf.ModflowGwf( - sim, modelname=model_name, model_nam_file=f"{model_name}.nam" - ) - - # create solution and add the model - ims_package = mfims.ModflowIms( - sim, - print_option="ALL", - complexity="SIMPLE", - outer_dvclose=0.00001, - outer_maximum=50, - under_relaxation="NONE", - inner_maximum=30, - inner_dvclose=0.00001, - linear_acceleration="CG", - preconditioner_levels=7, - preconditioner_drop_tolerance=0.01, - number_orthogonalizations=2, - ) - sim.register_ims_package(ims_package, [model_name]) - - # add packages to model - dis_package = mfgwfdis.ModflowGwfdis( - model, - length_units="FEET", - nlay=1, - nrow=1, - ncol=10, - delr=500.0, - delc=500.0, - top=100.0, - botm=50.0, - filename=f"{model_name}.dis", - ) - ic_package = mfgwfic.ModflowGwfic( - model, - strt=[100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0], - filename=f"{model_name}.ic", - ) - npf_package = mfgwfnpf.ModflowGwfnpf(model, save_flows=True, icelltype=1, k=100.0) - - sto_package = mfgwfsto.ModflowGwfsto( - model, save_flows=True, iconvert=1, ss=0.000001, sy=0.15 - ) - - wel_package = mfgwfwel.ModflowGwfwel( - model, - print_input=True, - print_flows=True, - save_flows=True, - maxbound=2, - stress_period_data=[((0, 0, 4), -2000.0), ((0, 0, 7), -2.0)], - ) - wel_package.stress_period_data.add_transient_key(1) - wel_package.stress_period_data.set_data([((0, 0, 4), -200.0)], 1) - - drn_package = mfgwfdrn.ModflowGwfdrn( - model, - print_input=True, - print_flows=True, - save_flows=True, - maxbound=1, - stress_period_data=[((0, 0, 0), 80, 60.0)], - ) - - riv_package = mfgwfriv.ModflowGwfriv( - model, - print_input=True, - print_flows=True, - save_flows=True, - maxbound=1, - stress_period_data=[((0, 0, 9), 110, 90.0, 100.0)], - ) - oc_package = mfgwfoc.ModflowGwfoc( - model, - budget_filerecord=[f"{model_name}.cbc"], - head_filerecord=[f"{model_name}.hds"], - saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], - printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], - ) - oc_package.saverecord.add_transient_key(1) - oc_package.saverecord.set_data([("HEAD", "ALL"), ("BUDGET", "ALL")], 1) - oc_package.printrecord.add_transient_key(1) - oc_package.printrecord.set_data([("HEAD", "ALL"), ("BUDGET", "ALL")], 1) - - # write the simulation input files - sim.write_simulation() - - # run the simulation and look for output - success, buff = sim.run_simulation() - assert success - - -@requires_exe("mf6") -def test_get_set_data_record(function_tmpdir): - # names - sim_name = "testrecordsim" - model_name = "testrecordmodel" - exe_name = "mf6" - - # set up simulation - tdis_name = f"{sim_name}.tdis" - sim = MFSimulation( - sim_name=sim_name, - version="mf6", - exe_name=exe_name, - sim_ws=str(function_tmpdir), - ) - tdis_rc = [(10.0, 4, 1.0), (6.0, 3, 1.0)] - tdis = mftdis.ModflowTdis(sim, time_units="DAYS", nper=2, perioddata=tdis_rc) - - # create model instance - model = mfgwf.ModflowGwf( - sim, modelname=model_name, model_nam_file=f"{model_name}.nam" - ) - - # create solution and add the model - ims_package = mfims.ModflowIms( - sim, - print_option="ALL", - complexity="SIMPLE", - outer_dvclose=0.00001, - outer_maximum=50, - under_relaxation="NONE", - inner_maximum=30, - inner_dvclose=0.00001, - linear_acceleration="CG", - preconditioner_levels=7, - preconditioner_drop_tolerance=0.01, - number_orthogonalizations=2, - ) - sim.register_ims_package(ims_package, [model_name]) - - # add packages to model - dis_package = mfgwfdis.ModflowGwfdis( - model, - length_units="FEET", - nlay=3, - nrow=10, - ncol=10, - delr=500.0, - delc=500.0, - top=100.0, - botm=[50.0, 10.0, -50.0], - filename=f"{model_name}.dis", - ) - ic_package = mfgwfic.ModflowGwfic( - model, - strt=[100.0, 90.0, 80.0], - filename=f"{model_name}.ic", - ) - npf_package = mfgwfnpf.ModflowGwfnpf( - model, save_flows=True, icelltype=1, k=50.0, k33=1.0 - ) - - sto_package = mfgwfsto.ModflowGwfsto( - model, save_flows=True, iconvert=1, ss=0.000001, sy=0.15 - ) - # wel packages - period_one = ModflowGwfwel.stress_period_data.empty( - model, - maxbound=3, - aux_vars=["var1", "var2", "var3"], - boundnames=True, - timeseries=True, - ) - period_one[0][0] = ((0, 9, 2), -50.0, -1, -2, -3, None) - period_one[0][1] = ((1, 4, 7), -100.0, 1, 2, 3, "well_1") - period_one[0][2] = ((1, 3, 2), -20.0, 4, 5, 6, "well_2") - period_two = ModflowGwfwel.stress_period_data.empty( - model, - maxbound=2, - aux_vars=["var1", "var2", "var3"], - boundnames=True, - timeseries=True, - ) - period_two[0][0] = ((2, 3, 2), -80.0, 1, 2, 3, "well_2") - period_two[0][1] = ((2, 4, 7), -10.0, 4, 5, 6, "well_1") - stress_period_data = {} - stress_period_data[0] = period_one[0] - stress_period_data[1] = period_two[0] - wel_package = ModflowGwfwel( - model, - print_input=True, - print_flows=True, - auxiliary=[("var1", "var2", "var3")], - maxbound=5, - stress_period_data=stress_period_data, - boundnames=True, - save_flows=True, - ) - # rch package - rch_period_list = [] - for row in range(0, 10): - for col in range(0, 10): - rch_amt = (1 + row / 10) * (1 + col / 10) - rch_period_list.append(((0, row, col), rch_amt, 0.5)) - rch_period = {} - rch_period[0] = rch_period_list - rch_package = ModflowGwfrch( - model, - fixed_cell=True, - auxiliary="MULTIPLIER", - auxmultname="MULTIPLIER", - print_input=True, - print_flows=True, - save_flows=True, - maxbound=54, - stress_period_data=rch_period, - ) - - # write simulation to new location - sim.set_all_data_external() - sim.write_simulation() - - # test get_record, set_record for list data - wel = model.get_package("wel") - spd_record = wel.stress_period_data.get_record() - well_sp_1 = spd_record[0] - assert well_sp_1["filename"] == "testrecordmodel.wel_stress_period_data_1.txt" - assert well_sp_1["binary"] is False - assert well_sp_1["data"][0][0] == (0, 9, 2) - assert well_sp_1["data"][0][1] == -50.0 - # modify - del well_sp_1["filename"] - well_sp_1["data"][0][0] = (1, 9, 2) - well_sp_2 = spd_record[1] - del well_sp_2["filename"] - well_sp_2["data"][0][0] = (1, 1, 1) - # save - spd_record[0] = well_sp_1 - spd_record[1] = well_sp_2 - wel.stress_period_data.set_record(spd_record) - # verify changes - spd_record = wel.stress_period_data.get_record() - well_sp_1 = spd_record[0] - assert "filename" not in well_sp_1 - assert well_sp_1["data"][0][0] == (1, 9, 2) - assert well_sp_1["data"][0][1] == -50.0 - well_sp_2 = spd_record[1] - assert "filename" not in well_sp_2 - assert well_sp_2["data"][0][0] == (1, 1, 1) - spd = wel.stress_period_data.get_data() - assert spd[0][0][0] == (1, 9, 2) - # change well_sp_2 back to external - well_sp_2["filename"] = "wel_spd_data_2.txt" - spd_record[1] = well_sp_2 - wel.stress_period_data.set_record(spd_record) - # change well_sp_2 data - spd[1][0][0] = (1, 2, 2) - wel.stress_period_data.set_data(spd) - # verify changes - spd_record = wel.stress_period_data.get_record() - well_sp_2 = spd_record[1] - assert well_sp_2["filename"] == "wel_spd_data_2.txt" - assert well_sp_2["data"][0][0] == (1, 2, 2) - - # test get_data/set_data vs get_record/set_record - dis = model.get_package("dis") - botm = dis.botm.get_record() - assert len(botm) == 3 - layer_2 = botm[1] - layer_3 = botm[2] - # verify layer 2 - assert layer_2["filename"] == "testrecordmodel.dis_botm_layer2.txt" - assert layer_2["binary"] is False - assert layer_2["factor"] == 1.0 - assert layer_2["iprn"] is None - assert layer_2["data"][0][0] == 10.0 - # change and set layer 2 - layer_2["filename"] = "botm_layer2.txt" - layer_2["binary"] = True - layer_2["iprn"] = 3 - layer_2["factor"] = 2.0 - layer_2["data"] = layer_2["data"] * 0.5 - botm[1] = layer_2 - # change and set layer 3 - del layer_3["filename"] - layer_3["factor"] = 0.5 - layer_3["data"] = layer_3["data"] * 2.0 - botm[2] = layer_3 - dis.botm.set_record(botm) - - # get botm in two different ways, verifying changes made - botm_record = dis.botm.get_record() - layer_1 = botm_record[0] - assert layer_1["filename"] == "testrecordmodel.dis_botm_layer1.txt" - assert layer_1["binary"] is False - assert layer_1["iprn"] is None - assert layer_1["data"][0][0] == 50.0 - layer_2 = botm_record[1] - assert layer_2["filename"] == "botm_layer2.txt" - assert layer_2["binary"] is True - assert layer_2["factor"] == 2.0 - assert layer_2["iprn"] == 3 - assert layer_2["data"][0][0] == 5.0 - layer_3 = botm_record[2] - assert "filename" not in layer_3 - assert layer_3["factor"] == 0.5 - assert layer_3["data"][0][0] == -100.0 - botm_data = dis.botm.get_data(apply_mult=True) - assert botm_data[0][0][0] == 50.0 - assert botm_data[1][0][0] == 10.0 - assert botm_data[2][0][0] == -50.0 - botm_data = dis.botm.get_data() - assert botm_data[0][0][0] == 50.0 - assert botm_data[1][0][0] == 5.0 - assert botm_data[2][0][0] == -100.0 - # modify and set botm data with set_data - botm_data[0][0][0] = 6.0 - botm_data[1][0][0] = -8.0 - botm_data[2][0][0] = -205.0 - dis.botm.set_data(botm_data) - # verify that data changed and metadata did not change - botm_record = dis.botm.get_record() - layer_1 = botm_record[0] - assert layer_1["filename"] == "testrecordmodel.dis_botm_layer1.txt" - assert layer_1["binary"] is False - assert layer_1["iprn"] is None - assert layer_1["data"][0][0] == 6.0 - assert layer_1["data"][0][1] == 50.0 - layer_2 = botm_record[1] - assert layer_2["filename"] == "botm_layer2.txt" - assert layer_2["binary"] is True - assert layer_2["factor"] == 2.0 - assert layer_2["iprn"] == 3 - assert layer_2["data"][0][0] == -8.0 - assert layer_2["data"][0][1] == 5.0 - layer_3 = botm_record[2] - assert "filename" not in layer_3 - assert layer_3["factor"] == 0.5 - assert layer_3["data"][0][0] == -205.0 - botm_data = dis.botm.get_data() - assert botm_data[0][0][0] == 6.0 - assert botm_data[1][0][0] == -8.0 - assert botm_data[2][0][0] == -205.0 - - spd_record = rch_package.stress_period_data.get_record() - assert 0 in spd_record - assert isinstance(spd_record[0], dict) - assert "filename" in spd_record[0] - assert spd_record[0]["filename"] == "testrecordmodel.rch_stress_period_data_1.txt" - assert "binary" in spd_record[0] - assert spd_record[0]["binary"] is False - assert "data" in spd_record[0] - assert spd_record[0]["data"][0][0] == (0, 0, 0) - spd_record[0]["data"][0][0] = (0, 0, 8) - rch_package.stress_period_data.set_record(spd_record) - - spd_data = rch_package.stress_period_data.get_data() - assert spd_data[0][0][0] == (0, 0, 8) - spd_data[0][0][0] = (0, 0, 7) - rch_package.stress_period_data.set_data(spd_data) - - spd_record = rch_package.stress_period_data.get_record() - assert isinstance(spd_record[0], dict) - assert "filename" in spd_record[0] - assert spd_record[0]["filename"] == "testrecordmodel.rch_stress_period_data_1.txt" - assert "binary" in spd_record[0] - assert spd_record[0]["binary"] is False - assert "data" in spd_record[0] - assert spd_record[0]["data"][0][0] == (0, 0, 7) - - sim.write_simulation() - - -@requires_exe("mf6") -def test_output(function_tmpdir, example_data_path): - ex_name = "test001e_UZF_3lay" - sim_ws = example_data_path / "mf6" / ex_name - sim = MFSimulation.load(sim_ws=sim_ws, exe_name="mf6") - sim.set_sim_path(str(function_tmpdir)) - sim.write_simulation() - success, buff = sim.run_simulation() - assert success, f"simulation {sim.name} did not run" - - ml = sim.get_model("gwf_1") - - bud = ml.oc.output.budget() - budcsv = ml.oc.output.budgetcsv() - assert budcsv.file.closed - hds = ml.oc.output.head() - lst = ml.oc.output.list() - - idomain = np.ones(ml.modelgrid.shape, dtype=int) - zonbud = ml.oc.output.zonebudget(idomain) - - assert isinstance(bud, CellBudgetFile) - assert isinstance(budcsv, CsvFile) - assert isinstance(hds, HeadFile) - assert isinstance(zonbud, ZoneBudget6) - assert isinstance(lst, Mf6ListBudget) - - bud = ml.output.budget() - budcsv = ml.output.budgetcsv() - hds = ml.output.head() - zonbud = ml.output.zonebudget(idomain) - lst = ml.output.list() - - assert isinstance(bud, CellBudgetFile) - assert isinstance(budcsv, CsvFile) - assert isinstance(hds, HeadFile) - assert isinstance(zonbud, ZoneBudget6) - assert isinstance(lst, Mf6ListBudget) - - uzf = ml.uzf - uzf_bud = uzf.output.budget() - uzf_budcsv = uzf.output.budgetcsv() - conv = uzf.output.package_convergence() - uzf_obs = uzf.output.obs() - uzf_zonbud = uzf.output.zonebudget(idomain) - - assert isinstance(uzf_bud, CellBudgetFile) - assert isinstance(uzf_budcsv, CsvFile) - if conv is not None: - assert isinstance(conv, CsvFile) - assert isinstance(uzf_obs, Mf6Obs) - assert isinstance(uzf_zonbud, ZoneBudget6) - assert ml.dis.output.methods() is None - - -@requires_exe("mf6") -@pytest.mark.slow -def test_output_add_observation(function_tmpdir, example_data_path): - model_name = "lakeex2a" - sim_ws = str(example_data_path / "mf6" / "test045_lake2tr") - sim = MFSimulation.load(sim_ws=sim_ws, exe_name="mf6") - gwf = sim.get_model(model_name) - - # remove sfr_obs and add a new sfr obs - sfr = gwf.sfr - obs_file = f"{model_name}.sfr.obs" - csv_file = f"{obs_file}.csv" - obs_dict = { - csv_file: [ - ("l08_stage", "stage", (8,)), - ("l09_stage", "stage", (9,)), - ("l14_stage", "stage", (14,)), - ("l15_stage", "stage", (15,)), - ] - } - gwf.sfr.obs.initialize( - filename=obs_file, digits=10, print_input=True, continuous=obs_dict - ) - - sim.set_sim_path(str(function_tmpdir)) - sim.write_simulation() - - success, buff = sim.run_simulation() - assert success, f"simulation {sim.name} did not run" - - # check that .output finds the newly added OBS package - sfr_obs = gwf.sfr.output.obs() - - assert isinstance(sfr_obs, Mf6Obs), ( - "remove and add observation test (Mf6Output) failed" - ) - - -@requires_exe("mf6") -def test_sfr_connections(function_tmpdir, example_data_path): - """MODFLOW just warns if any reaches are unconnected - flopy fails to load model if reach 1 is unconnected, fine with other unconnected - """ - - data_path = example_data_path / "mf6" / "test666_sfrconnections" - sim_ws = function_tmpdir - for test in ["sfr0", "sfr1"]: - sim_name = "test_sfr" - model_name = "test_sfr" - tdis_name = f"{sim_name}.tdis" - sim = MFSimulation( - sim_name=sim_name, version="mf6", exe_name="mf6", sim_ws=sim_ws - ) - tdis_rc = [(1.0, 1, 1.0)] - tdis = ModflowTdis(sim, time_units="DAYS", nper=1, perioddata=tdis_rc) - ims_package = ModflowIms( - sim, - pname="my_ims_file", - filename=f"{sim_name}.ims", - print_option="ALL", - complexity="SIMPLE", - ) - model = ModflowGwf( - sim, modelname=model_name, model_nam_file=f"{model_name}.nam" - ) - - dis = ModflowGwfdis( - model, - length_units="FEET", - nlay=1, - nrow=5, - ncol=5, - delr=5000.0, - delc=5000.0, - top=100.0, - botm=-100.0, - filename=f"{model_name}.dis", - ) - ic_package = ModflowGwfic(model, filename=f"{model_name}.ic") - npf_package = ModflowGwfnpf( - model, - pname="npf", - save_flows=True, - alternative_cell_averaging="logarithmic", - icelltype=1, - k=50.0, - ) - - cnfile = f"mf6_{test}_connection.txt" - pkfile = f"mf6_{test}_package.txt" - - with open(data_path / pkfile, "r") as f: - nreaches = len(f.readlines()) - sfr = ModflowGwfsfr( - model, - packagedata={"filename": str(data_path / pkfile)}, - connectiondata={"filename": str(data_path / cnfile)}, - nreaches=nreaches, - pname="sfr", - unit_conversion=86400, - ) - sim.set_all_data_external() - sim.write_simulation() - success, buff = sim.run_simulation() - assert success, f"simulation {sim.name} did not run" - - # reload simulation - sim2 = MFSimulation.load(sim_ws=sim_ws) - sim2.set_all_data_external() - sim2.write_simulation() - success, buff = sim2.run_simulation() - assert success, f"simulation {sim2.name} did not run after being reloaded" - - # test sfr recarray data - model2 = sim2.get_model() - sfr2 = model2.get_package("sfr") - sfr_pd = sfr2.packagedata - rec_data = [ - (0, 0, 0, 0, 1.0, 1.0, 0.01, 10.0, 1.0, 1.0, 1.0, 1, 1.0, 0), - (1, 0, 1, 0, 1.0, 1.0, 0.01, 10.0, 1.0, 1.0, 1.0, 2, 1.0, 0), - ] - rec_type = [ - ("ifno", int), - ("layer", int), - ("row", int), - ("column", int), - ("rlen", float), - ("rwid", float), - ("rgrd", float), - ("rtp", float), - ("rbth", float), - ("rhk", float), - ("man", float), - ("nconn", int), - ("ustrf", float), - ("nvd", int), - ] - pkg_data = np.rec.array(rec_data, rec_type) - sfr_pd.set_record({"data": pkg_data}) - data = sfr_pd.get_data() - assert data[0][1] == (0, 0, 0) - - -@requires_exe("mf6") -def test_array(function_tmpdir): - # get_data - # empty data in period block vs data repeating - # array - # aux values, test that they work the same as other arrays (is a value - # of zero always used even if aux is defined in a previous stress - # period?) - - sim_name = "test_array" - model_name = "test_array" - out_dir = function_tmpdir - tdis_name = f"{sim_name}.tdis" - sim = MFSimulation(sim_name=sim_name, version="mf6", exe_name="mf6", sim_ws=out_dir) - tdis_rc = [(6.0, 2, 1.0), (6.0, 3, 1.0), (6.0, 3, 1.0), (6.0, 3, 1.0)] - tdis = ModflowTdis(sim, time_units="DAYS", nper=4, perioddata=tdis_rc) - ims_package = ModflowIms( - sim, - pname="my_ims_file", - filename=f"{sim_name}.ims", - print_option="ALL", - complexity="SIMPLE", - outer_dvclose=0.0001, - outer_maximum=50, - under_relaxation="NONE", - inner_maximum=30, - inner_dvclose=0.0001, - linear_acceleration="CG", - preconditioner_levels=7, - preconditioner_drop_tolerance=0.01, - number_orthogonalizations=2, - ) - model = ModflowGwf(sim, modelname=model_name, model_nam_file=f"{model_name}.nam") - - dis = ModflowGwfdis( - model, - length_units="FEET", - nlay=4, - nrow=2, - ncol=2, - delr=5000.0, - delc=5000.0, - top=100.0, - botm=[50.0, 0.0, -50.0, -100.0], - filename=f"{model_name} 1.dis", - ) - ic_package = ModflowGwfic(model, strt=90.0, filename=f"{model_name}.ic") - npf_package = ModflowGwfnpf( - model, - pname="npf_1", - save_flows=True, - alternative_cell_averaging="logarithmic", - icelltype=1, - k=50.0, - ) - - oc_package = ModflowGwfoc( - model, - budget_filerecord=[("test_array.cbc",)], - head_filerecord=[("test_array.hds",)], - saverecord={ - 0: [("HEAD", "ALL"), ("BUDGET", "ALL")], - 1: [], - }, - printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], - ) - - aux = {1: [[50.0], [1.3]], 3: [[200.0], [1.5]]} - irch = {1: [[0, 2], [2, 1]], 2: [[0, 1], [2, 3]]} - rcha = ModflowGwfrcha( - model, - print_input=True, - print_flows=True, - auxiliary=[("var1", "var2")], - irch=irch, - recharge={1: 0.0001, 2: 0.00001}, - aux=aux, - ) - val_irch = rcha.irch.array.sum(axis=(1, 2, 3)) - assert val_irch[0] == 4 - assert val_irch[1] == 5 - assert val_irch[2] == 6 - assert val_irch[3] == 6 - val_irch_2 = rcha.irch.get_data() - assert val_irch_2[0] is None - assert val_irch_2[1][1, 1] == 1 - assert val_irch_2[2][1, 1] == 3 - assert val_irch_2[3] is None - val_irch_2_3 = rcha.irch.get_data(3) - assert val_irch_2_3 is None - val_rch = rcha.recharge.array.sum(axis=(1, 2, 3)) - assert val_rch[0] == 0.0 - assert val_rch[1] == 0.0004 - assert val_rch[2] == 0.00004 - assert val_rch[3] == 0.00004 - val_rch_2 = rcha.recharge.get_data() - assert val_rch_2[0] is None - assert val_rch_2[1][0, 0] == 0.0001 - assert val_rch_2[2][0, 0] == 0.00001 - assert val_rch_2[3] is None - aux_data_0 = rcha.aux.get_data(0) - assert aux_data_0 is None - aux_data_1 = rcha.aux.get_data(1) - assert aux_data_1[0][0][0] == 50.0 - aux_data_2 = rcha.aux.get_data(2) - assert aux_data_2 is None - aux_data_3 = rcha.aux.get_data(3) - assert aux_data_3[0][0][0] == 200.0 - - welspdict = {1: [[(0, 0, 0), 0.25, 0.0]], 2: [[(0, 0, 0), 0.1, 0.0]]} - wel = ModflowGwfwel( - model, - print_input=True, - print_flows=True, - mover=True, - stress_period_data=welspdict, - save_flows=False, - auxiliary="CONCENTRATION", - pname="WEL-1", - ) - wel_array = wel.stress_period_data.array - #print(type(wel.stress_period_data)) - #print(type(wel.stress_period_data.array)) - assert wel_array[0] is None - assert wel_array[1][0][1] == 0.25 - assert wel_array[2][0][1] == 0.1 - assert wel_array[3][0][1] == 0.1 - - drnspdict = { - 0: [[(0, 0, 0), 60.0, 10.0]], - 2: [], - 3: [[(0, 0, 0), 55.0, 5.0]], - } - drn = ModflowGwfdrn( - model, - print_input=True, - print_flows=True, - stress_period_data=drnspdict, - save_flows=False, - pname="DRN-1", - ) - drn_array = drn.stress_period_data.array - assert drn_array[0][0][1] == 60.0 - assert drn_array[1][0][1] == 60.0 - assert drn_array[2] is None - assert drn_array[3][0][1] == 55.0 - drn_gd_0 = drn.stress_period_data.get_data(0) - assert drn_gd_0[0][1] == 60.0 - drn_gd_1 = drn.stress_period_data.get_data(1) - assert drn_gd_1 is None - drn_gd_2 = drn.stress_period_data.get_data(2) - assert len(drn_gd_2) == 0 - drn_gd_3 = drn.stress_period_data.get_data(3) - assert drn_gd_3[0][1] == 55.0 - - ghbspdict = { - 0: [[(0, 1, 1), 60.0, 10.0]], - } - ghb = ModflowGwfghb( - model, - print_input=True, - print_flows=True, - stress_period_data=ghbspdict, - save_flows=False, - pname="GHB-1", - ) - - lakpd = [(0, 70.0, 1), (1, 65.0, 1)] - lakecn = [ - (0, 0, (0, 0, 0), "HORIZONTAL", 1.0, 60.0, 90.0, 10.0, 1.0), - (1, 0, (0, 1, 1), "HORIZONTAL", 1.0, 60.0, 90.0, 10.0, 1.0), - ] - lak_tables = [(0, "lak01.tab"), (1, "lak02.tab")] - lak = ModflowGwflak( - model, - pname="lak", - print_input=True, - mover=True, - nlakes=2, - noutlets=0, - ntables=1, - packagedata=lakpd, - connectiondata=lakecn, - tables=lak_tables, - ) - - table_01 = [ - (30.0, 100000.0, 10000.0), - (40.0, 200500.0, 10100.0), - (50.0, 301200.0, 10130.0), - (60.0, 402000.0, 10180.0), - (70.0, 503000.0, 10200.0), - (80.0, 700000.0, 20000.0), - ] - lak_tab = ModflowUtllaktab( - model, - filename="lak01.tab", - nrow=6, - ncol=3, - table=table_01, - ) - - table_02 = [ - (40.0, 100000.0, 10000.0), - (50.0, 200500.0, 10100.0), - (60.0, 301200.0, 10130.0), - (70.0, 402000.0, 10180.0), - (80.0, 503000.0, 10200.0), - (90.0, 700000.0, 20000.0), - ] - lak_tab_2 = ModflowUtllaktab( - model, - filename="lak02.tab", - nrow=6, - ncol=3, - table=table_02, - ) - wel_name_1 = wel.name[0] - lak_name_2 = lak.name[0] - package_data = [(wel_name_1,), (lak_name_2,)] - period_data = [(wel_name_1, 0, lak_name_2, 0, "FACTOR", 1.0)] - fname = f"{model.name}.input.mvr" - mvr = ModflowGwfmvr( - parent_model_or_package=model, - filename=fname, - print_input=True, - print_flows=True, - maxpackages=2, - maxmvr=1, - packages=package_data, - perioddata=period_data, - ) - - # test writing and loading model - sim.write_simulation() - sim.run_simulation() - - test_sim = MFSimulation.load( - sim_name, - "mf6", - "mf6", - out_dir, - write_headers=False, - ) - model = test_sim.get_model() - dis = model.get_package("dis") - rcha = model.get_package("rcha") - wel = model.get_package("wel") - drn = model.get_package("drn") - lak = model.get_package("lak") - lak_tab = model.get_package("laktab") - assert os.path.split(dis.filename)[1] == f"{model_name} 1.dis" - # do same tests as above - val_irch = rcha.irch.array.sum(axis=(1, 2, 3)) - assert val_irch[0] == 4 - assert val_irch[1] == 5 - assert val_irch[2] == 6 - assert val_irch[3] == 6 - val_irch_2 = rcha.irch.get_data() - assert val_irch_2[0] is None - assert val_irch_2[1][1, 1] == 1 - assert val_irch_2[2][1, 1] == 3 - assert val_irch_2[3] is None - val_rch = rcha.recharge.array.sum(axis=(1, 2, 3)) - assert val_rch[0] == 0.0 - assert val_rch[1] == 0.0004 - assert val_rch[2] == 0.00004 - assert val_rch[3] == 0.00004 - val_rch_2 = rcha.recharge.get_data() - assert val_rch_2[0] is None - assert val_rch_2[1][0, 0] == 0.0001 - assert val_rch_2[2][0, 0] == 0.00001 - assert val_rch_2[3] is None - aux_data_0 = rcha.aux.get_data(0) - assert aux_data_0 is None - aux_data_1 = rcha.aux.get_data(1) - assert aux_data_1[0][0][0] == 50.0 - aux_data_2 = rcha.aux.get_data(2) - assert aux_data_2 is None - aux_data_3 = rcha.aux.get_data(3) - assert aux_data_3[0][0][0] == 200.0 - - wel_array = wel.stress_period_data.array - assert wel_array[0] is None - assert wel_array[1][0][1] == 0.25 - assert wel_array[2][0][1] == 0.1 - assert wel_array[3][0][1] == 0.1 - - drn_array = drn.stress_period_data.array - assert drn_array[0][0][1] == 60.0 - assert drn_array[1][0][1] == 60.0 - assert drn_array[2] is None - assert drn_array[3][0][1] == 55.0 - drn_gd_0 = drn.stress_period_data.get_data(0) - assert drn_gd_0[0][1] == 60.0 - drn_gd_1 = drn.stress_period_data.get_data(1) - assert drn_gd_1 is None - drn_gd_2 = drn.stress_period_data.get_data(2) - assert len(drn_gd_2) == 0 - drn_gd_3 = drn.stress_period_data.get_data(3) - assert drn_gd_3[0][1] == 55.0 - - lak_tab_array = lak.tables.get_data() - assert lak_tab_array[0][1] == "lak01.tab" - assert lak_tab_array[1][1] == "lak02.tab" - - assert len(lak_tab) == 2 - lak_tab_1 = lak_tab[0].table.get_data() - assert lak_tab_1[0][0] == 30.0 - assert lak_tab_1[5][2] == 20000.0 - lak_tab_2 = lak_tab[1].table.get_data() - assert lak_tab_2[0][0] == 40.0 - assert lak_tab_2[4][1] == 503000.0 - - -@requires_exe("mf6") -def test_grid_array(function_tmpdir): - # get_data - # empty data in period block vs data repeating - # array - # aux values, test that they work the same as other arrays (is a value - # of zero always used even if aux is defined in a previous stress - # period?) - - sim_name = "test_grid_array" - model_name = "test_grid_array" - out_dir = function_tmpdir - tdis_name = f"{sim_name}.tdis" - sim = MFSimulation(sim_name=sim_name, version="mf6", exe_name="mf6", sim_ws=out_dir) - tdis_rc = [(6.0, 2, 1.0), (6.0, 3, 1.0), (6.0, 3, 1.0), (6.0, 3, 1.0)] - tdis = ModflowTdis(sim, time_units="DAYS", nper=4, perioddata=tdis_rc) - ims_package = ModflowIms( - sim, - pname="my_ims_file", - filename=f"{sim_name}.ims", - print_option="ALL", - complexity="SIMPLE", - outer_dvclose=0.0001, - outer_maximum=50, - under_relaxation="NONE", - inner_maximum=30, - inner_dvclose=0.0001, - linear_acceleration="CG", - preconditioner_levels=7, - preconditioner_drop_tolerance=0.01, - number_orthogonalizations=2, - ) - model = ModflowGwf(sim, modelname=model_name, model_nam_file=f"{model_name}.nam") - - dis = ModflowGwfdis( - model, - length_units="FEET", - nlay=4, - nrow=2, - ncol=2, - delr=5000.0, - delc=5000.0, - top=100.0, - botm=[50.0, 0.0, -50.0, -100.0], - filename=f"{model_name} 1.dis", - ) - ic_package = ModflowGwfic(model, strt=90.0, filename=f"{model_name}.ic") - npf_package = ModflowGwfnpf( - model, - pname="npf_1", - save_flows=True, - alternative_cell_averaging="logarithmic", - icelltype=1, - k=50.0, - ) - - oc_package = ModflowGwfoc( - model, - budget_filerecord=[("test_array.cbc",)], - head_filerecord=[("test_array.hds",)], - saverecord={ - 0: [("HEAD", "ALL"), ("BUDGET", "ALL")], - 1: [], - }, - printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], - ) - - aux = {1: [[50.0], [1.3]], 3: [[200.0], [1.5]]} - irch = {1: [[0, 2], [2, 1]], 2: [[0, 1], [2, 3]]} - rcha = ModflowGwfrcha( - model, - print_input=True, - print_flows=True, - auxiliary=[("var1", "var2")], - irch=irch, - recharge={1: 0.0001, 2: 0.00001}, - aux=aux, - ) - print(f"RENO RCHA") - print(np.shape(rcha.recharge.array)) - print(rcha.recharge.array) - print(rcha.recharge.get_data()) - print(f"RENO RCHA AUX") - print(rcha.aux.get_data(0)) - print(rcha.aux.get_data(1)) - print(rcha.aux.get_data(2)) - print(rcha.aux.get_data(3)) - val_irch = rcha.irch.array.sum(axis=(1, 2, 3)) - assert val_irch[0] == 4 - assert val_irch[1] == 5 - assert val_irch[2] == 6 - assert val_irch[3] == 6 - val_irch_2 = rcha.irch.get_data() - assert val_irch_2[0] is None - assert val_irch_2[1][1, 1] == 1 - assert val_irch_2[2][1, 1] == 3 - assert val_irch_2[3] is None - val_irch_2_3 = rcha.irch.get_data(3) - assert val_irch_2_3 is None - val_rch = rcha.recharge.array.sum(axis=(1, 2, 3)) - assert val_rch[0] == 0.0 - assert val_rch[1] == 0.0004 - assert val_rch[2] == 0.00004 - assert val_rch[3] == 0.00004 - val_rch_2 = rcha.recharge.get_data() - assert val_rch_2[0] is None - assert val_rch_2[1][0, 0] == 0.0001 - assert val_rch_2[2][0, 0] == 0.00001 - assert val_rch_2[3] is None - aux_data_0 = rcha.aux.get_data(0) - assert aux_data_0 is None - aux_data_1 = rcha.aux.get_data(1) - assert aux_data_1[0][0][0] == 50.0 - aux_data_2 = rcha.aux.get_data(2) - assert aux_data_2 is None - aux_data_3 = rcha.aux.get_data(3) - assert aux_data_3[0][0][0] == 200.0 - -# welspdict = {1: [[(0, 0, 0), 0.25, 0.0]], 2: [[(0, 0, 0), 0.1, 0.0]]} -# wel = ModflowGwfwel( -# model, -# print_input=True, -# print_flows=True, -# mover=True, -# stress_period_data=welspdict, -# save_flows=False, -# auxiliary="CONCENTRATION", -# pname="WEL-1", -# ) -# wel_array = wel.stress_period_data.array -# assert wel_array[0] is None -# assert wel_array[1][0][1] == 0.25 -# assert wel_array[2][0][1] == 0.1 -# assert wel_array[3][0][1] == 0.1 - - nlay = dis.nlay.get_data() - nrow = dis.nrow.get_data() - ncol = dis.ncol.get_data() - - DNODATA = 3.0e30 # MF6 DNODATA constant - welqspd = {} - welconcspd = {} - for n in range(2): - #for n in range(4): - q = np.full((nlay, nrow, ncol), DNODATA, dtype=float) - welconc = np.full((nlay, nrow, ncol), DNODATA, dtype=float) - welaux2 = np.full((nlay, nrow, ncol), DNODATA, dtype=float) - #if n == 0: - if n == 1: - q[0, 0, 0] = 0.25 - welconc[0, 0, 0] = 0.0 - welaux2[0, 0, 0] = 9.0 - #elif n == 1: - elif n == 2: - q[0, 0, 0] = 0.1 - welconc[0, 0, 0] = 0.0 - welaux2[0, 0, 0] = 9.0 - #welqspd[n + 1] = q - #welconcspd[n + 1] = [welconc] - welqspd[n] = q - welconcspd[n] = [welconc, welaux2] - - # first create test package with multiple auxvars - wel = ModflowGwfwelg( - model, - print_input=True, - print_flows=True, - mover=True, - save_flows=False, - auxiliary=["var1", "var2"], - pname="WEL-1", - q=welqspd, - #aux={1: [[50.0], [1.3]], 3: [[200.0], [1.5]]}, - aux=welconcspd, - ) - - print(wel.q.array) - print("FULLONE:") - print(wel.q.get_data()) - print("FULLTWO:") - print(wel.aux.array) - print("FULLONEAUX:") - print(wel.aux.get_data()) - print("FULLTWOAUX:") - aux = wel.aux.array - print(type(aux)) - print(dir(aux)) - print(np.shape(aux)) - print("1") - print(wel.aux.array[1, 0]) - print("2") - print(wel.aux.array[1, 1]) - assert np.allclose(wel.aux.array[0], wel.aux.get_data(0)[0]) - assert np.allclose(wel.aux.array[1], wel.aux.get_data(0)[1]) - assert np.allclose(wel.aux.array[2], wel.aux.get_data(1)[0]) - assert np.allclose(wel.aux.array[3], wel.aux.get_data(1)[1]) - assert np.allclose(wel.aux.array[4], wel.aux.get_data(2)[0]) - assert np.allclose(wel.aux.array[5], wel.aux.get_data(2)[1]) - assert np.allclose(wel.aux.array[6], wel.aux.get_data(3)[0]) - assert np.allclose(wel.aux.array[7], wel.aux.get_data(3)[1]) - - - assert False - assert wel.q.get_data()[0] is None - assert wel.q.get_data(0) is None - assert np.allclose(wel.q.get_data()[1], wel.q.get_data(1)) - assert np.allclose(wel.q.get_data()[2], wel.q.get_data(2)) - assert len(wel.q.array) == 4 - print(wel.q.array[1]) - print("ONE:") - print(wel.q.get_data(1)) - print("TWO:") - print(wel.q.array) - print("FULLONE:") - print(wel.q.get_data()) - print("FULLTWO:") - assert np.allclose(wel.q.array[1], wel.q.get_data(1)) - assert np.allclose(wel.q.array[2], wel.q.get_data(2)) - assert wel.q.get_data()[3] is None - assert wel.q.get_data(3) is None - - - sim.write_simulation() - assert False - print("RENO WELG") - #print(welqspd) - assert not wel.has_stress_period_data - print(wel.q.array) - print(wel.q.get_data()) - print(np.shape(wel.q.array)) - q_nan = np.where(wel.q.array == DNODATA, np.nan, wel.q.array) - val_q = np.nansum(q_nan, axis=(1, 2, 3, 4)) - assert val_q[0] == 0.0 - assert val_q[1] == 0.25 - assert val_q[2] == 0.1 - assert val_q[3] == 0.1 - val_q_2 = wel.q.get_data() - assert val_q_2[0] is None - assert val_q_2[1][0, 0, 0] == 0.25 - assert val_q_2[2][0, 0, 0] == 0.1 - assert val_q_2[3] is None - print("RENO WELG AUX") - print(wel.aux.array) - print(wel.aux.get_data(0)) - print(wel.aux.get_data(1)) - print(wel.aux.get_data(2)) - print(wel.aux.get_data(3)) - print(wel.aux.array) - aux_data_0 = wel.aux.get_data(0) - assert aux_data_0 is None - aux_data_1 = wel.aux.get_data(1) - print(aux_data_1) - assert aux_data_1[0][0][0][0] == 50.0 - assert aux_data_1[1][0][0][0] == 1.3 - aux_data_2 = wel.aux.get_data(2) - assert aux_data_2 is None - aux_data_3 = wel.aux.get_data(3) - assert aux_data_3[0][0][0][0] == 200.0 - assert aux_data_3[1][0][0][0] == 1.5 - #print(wel.stress_period_data) - for k in wel.q._data_storage.keys(): - print(f"RENO k={k}") - wel.q.get_data_prep(k) - print(wel.q._data_storage[k].get_data()) - - print(type(wel.q)) - print(dir(wel.q)) - print(type(wel.q[0])) - print(dir(wel.q[0])) - #assert wel.q[0] is None - #assert wel.q[1[0][1] == 0.25 - - # remove test wel package - wel.remove() - - # create welg package - wel = ModflowGwfwelg( - model, - print_input=True, - print_flows=True, - mover=True, - save_flows=False, - auxiliary=["CONCENTRATION"], - pname="WEL-1", - q=welqspd, - aux=welconcspd, - ) - - print("RENO WELG 2") - print(wel) - assert not wel.has_stress_period_data - print(wel.q.array) - print(wel.q.get_data()) - print(np.shape(wel.q.array)) - q_nan = np.where(wel.q.array == DNODATA, np.nan, wel.q.array) - val_q = np.nansum(q_nan, axis=(1, 2, 3, 4)) - assert val_q[0] == 0.0 - assert val_q[1] == 0.25 - assert val_q[2] == 0.1 - assert val_q[3] == 0.1 - val_q_2 = wel.q.get_data() - assert val_q_2[0] is None - assert val_q_2[1][0, 0, 0] == 0.25 - assert val_q_2[2][0, 0, 0] == 0.1 - assert val_q_2[3] is None - print("RENO WELG AUX 2") - #print(welconcspd) - print(wel.aux.get_data()) - print(wel.aux.array) - print(wel.aux.array) - print(wel.aux.get_data(0)) - print(wel.aux.array) - print(wel.aux.get_data(1)) - print(wel.aux.get_data(2)) - print(wel.aux.get_data(3)) - print(wel.aux.array) - aux_data_0 = wel.aux.get_data(0) - print(wel.aux.array) - assert aux_data_0 is None - aux_data_1 = wel.aux.get_data(1) - print(aux_data_1) - assert aux_data_1[0][0][0][0] == 0.0 - assert aux_data_1[0][0, 0, 0] == 0.0 - aux_data_2 = wel.aux.get_data(2) - #assert aux_data_2[0][0][0] == 0.0 - assert aux_data_2[0][0, 0, 0] == 0.0 - aux_data_3 = wel.aux.get_data(3) - assert aux_data_3 is None - - print(wel.aux.get_data()) - print(wel.aux.array) - #assert False - - drnspdict = { - 0: [[(0, 0, 0), 60.0, 10.0]], - 2: [], - 3: [[(0, 0, 0), 55.0, 5.0]], - } - drn = ModflowGwfdrn( - model, - print_input=True, - print_flows=True, - stress_period_data=drnspdict, - save_flows=False, - pname="DRN-1", - ) - drn_array = drn.stress_period_data.array - assert drn_array[0][0][1] == 60.0 - assert drn_array[1][0][1] == 60.0 - assert drn_array[2] is None - assert drn_array[3][0][1] == 55.0 - drn_gd_0 = drn.stress_period_data.get_data(0) - assert drn_gd_0[0][1] == 60.0 - drn_gd_1 = drn.stress_period_data.get_data(1) - assert drn_gd_1 is None - drn_gd_2 = drn.stress_period_data.get_data(2) - assert len(drn_gd_2) == 0 - drn_gd_3 = drn.stress_period_data.get_data(3) - assert drn_gd_3[0][1] == 55.0 - - ghbspdict = { - 0: [[(0, 1, 1), 60.0, 10.0]], - } - ghb = ModflowGwfghb( - model, - print_input=True, - print_flows=True, - stress_period_data=ghbspdict, - save_flows=False, - pname="GHB-1", - ) - - lakpd = [(0, 70.0, 1), (1, 65.0, 1)] - lakecn = [ - (0, 0, (0, 0, 0), "HORIZONTAL", 1.0, 60.0, 90.0, 10.0, 1.0), - (1, 0, (0, 1, 1), "HORIZONTAL", 1.0, 60.0, 90.0, 10.0, 1.0), - ] - lak_tables = [(0, "lak01.tab"), (1, "lak02.tab")] - lak = ModflowGwflak( - model, - pname="lak", - print_input=True, - mover=True, - nlakes=2, - noutlets=0, - ntables=1, - packagedata=lakpd, - connectiondata=lakecn, - tables=lak_tables, - ) - - table_01 = [ - (30.0, 100000.0, 10000.0), - (40.0, 200500.0, 10100.0), - (50.0, 301200.0, 10130.0), - (60.0, 402000.0, 10180.0), - (70.0, 503000.0, 10200.0), - (80.0, 700000.0, 20000.0), - ] - lak_tab = ModflowUtllaktab( - model, - filename="lak01.tab", - nrow=6, - ncol=3, - table=table_01, - ) - - table_02 = [ - (40.0, 100000.0, 10000.0), - (50.0, 200500.0, 10100.0), - (60.0, 301200.0, 10130.0), - (70.0, 402000.0, 10180.0), - (80.0, 503000.0, 10200.0), - (90.0, 700000.0, 20000.0), - ] - lak_tab_2 = ModflowUtllaktab( - model, - filename="lak02.tab", - nrow=6, - ncol=3, - table=table_02, - ) - wel_name_1 = wel.name[0] - lak_name_2 = lak.name[0] - package_data = [(wel_name_1,), (lak_name_2,)] - period_data = [(wel_name_1, 0, lak_name_2, 0, "FACTOR", 1.0)] - fname = f"{model.name}.input.mvr" - mvr = ModflowGwfmvr( - parent_model_or_package=model, - filename=fname, - print_input=True, - print_flows=True, - maxpackages=2, - maxmvr=1, - packages=package_data, - perioddata=period_data, - ) - - # test writing and loading model - print(wel.aux.array) - sim.write_simulation() - print(wel.aux.array) - sim.run_simulation() - print(wel.aux.array) - - test_sim = MFSimulation.load( - sim_name, - "mf6", - "mf6", - out_dir, - write_headers=False, - ) - model = test_sim.get_model() - dis = model.get_package("dis") - rcha = model.get_package("rcha") - wel = model.get_package("wel") - drn = model.get_package("drn") - lak = model.get_package("lak") - lak_tab = model.get_package("laktab") - assert os.path.split(dis.filename)[1] == f"{model_name} 1.dis" - # do same tests as above - val_irch = rcha.irch.array.sum(axis=(1, 2, 3)) - assert val_irch[0] == 4 - assert val_irch[1] == 5 - assert val_irch[2] == 6 - assert val_irch[3] == 6 - val_irch_2 = rcha.irch.get_data() - assert val_irch_2[0] is None - assert val_irch_2[1][1, 1] == 1 - assert val_irch_2[2][1, 1] == 3 - assert val_irch_2[3] is None - val_rch = rcha.recharge.array.sum(axis=(1, 2, 3)) - assert val_rch[0] == 0.0 - assert val_rch[1] == 0.0004 - assert val_rch[2] == 0.00004 - assert val_rch[3] == 0.00004 - val_rch_2 = rcha.recharge.get_data() - assert val_rch_2[0] is None - assert val_rch_2[1][0, 0] == 0.0001 - assert val_rch_2[2][0, 0] == 0.00001 - assert val_rch_2[3] is None - aux_data_0 = rcha.aux.get_data(0) - assert aux_data_0 is None - aux_data_1 = rcha.aux.get_data(1) - assert aux_data_1[0][0][0] == 50.0 - aux_data_2 = rcha.aux.get_data(2) - assert aux_data_2 is None - aux_data_3 = rcha.aux.get_data(3) - assert aux_data_3[0][0][0] == 200.0 - - # RENO TODO - #wel_array = wel.stress_period_data.array - #assert wel_array[0] is None - #assert wel_array[1][0][1] == 0.25 - #assert wel_array[2][0][1] == 0.1 - #assert wel_array[3][0][1] == 0.1 - welg_q_per = wel.q.get_data() - #print(welg_q_per) - #print(wel.aux.get_data()) - print(wel.q.array) - print(wel.aux.array) - return - assert welg_q_per[0] == None - assert welg_q_per[1][0, 0, 0] == 0.25 - assert welg_q_per[2][0, 0, 0] == 0.1 - assert welg_q_per[3][0, 0, 0] == 0.1 - - - welg_q_per1 = wel.q.get_data(1) - print(wel.q.array) - assert welg_q_per1[0, 0, 0] == 0.25 - welg_aux_per1 = wel.aux.get_data(1) - assert welg_aux_per1[0][0, 0, 0] == 0.0 - - drn_array = drn.stress_period_data.array - assert drn_array[0][0][1] == 60.0 - assert drn_array[1][0][1] == 60.0 - assert drn_array[2] is None - assert drn_array[3][0][1] == 55.0 - drn_gd_0 = drn.stress_period_data.get_data(0) - assert drn_gd_0[0][1] == 60.0 - drn_gd_1 = drn.stress_period_data.get_data(1) - assert drn_gd_1 is None - drn_gd_2 = drn.stress_period_data.get_data(2) - assert len(drn_gd_2) == 0 - drn_gd_3 = drn.stress_period_data.get_data(3) - assert drn_gd_3[0][1] == 55.0 - - lak_tab_array = lak.tables.get_data() - assert lak_tab_array[0][1] == "lak01.tab" - assert lak_tab_array[1][1] == "lak02.tab" - - assert len(lak_tab) == 2 - lak_tab_1 = lak_tab[0].table.get_data() - assert lak_tab_1[0][0] == 30.0 - assert lak_tab_1[5][2] == 20000.0 - lak_tab_2 = lak_tab[1].table.get_data() - assert lak_tab_2[0][0] == 40.0 - assert lak_tab_2[4][1] == 503000.0 - - -@requires_exe("mf6") -def test_multi_model(function_tmpdir): - # init paths - test_ex_name = "test_multi_model" - model_names = ["gwf_model_1", "gwf_model_2", "gwt_model_1", "gwt_model_2"] - - # temporal discretization - nper = 1 - perlen = [5.0] - nstp = [200] - tsmult = [1.0] - tdis_rc = [] - for i in range(nper): - tdis_rc.append((perlen[i], nstp[i], tsmult[i])) - - # build MODFLOW 6 files - sim = MFSimulation( - sim_name=test_ex_name, - version="mf6", - exe_name="mf6", - sim_ws=str(function_tmpdir), - ) - # create tdis package - tdis = ModflowTdis( - sim, time_units="DAYS", nper=nper, perioddata=tdis_rc, pname="sim.tdis" - ) - - # grid information - nlay, nrow, ncol = 1, 1, 50 - - # Create gwf1 model - welspd = {0: [[(0, 0, 0), 1.0, 1.0]]} - chdspd = None - gwf1 = get_gwf_model( - sim, - model_names[0], - model_names[0], - (nlay, nrow, ncol), - chdspd=chdspd, - welspd=welspd, - ) - - # Create gwf2 model - welspd = {0: [[(0, 0, 1), 0.5, 0.5]]} - chdspd = {0: [[(0, 0, ncol - 1), 0.0000000]]} - gwf2 = get_gwf_model( - sim, - model_names[1], - model_names[1], - (nlay, nrow, ncol), - chdspd=chdspd, - welspd=welspd, - ) - lakpd = [(0, -100.0, 1)] - lakecn = [(0, 0, (0, 0, 0), "HORIZONTAL", 1.0, 0.1, 1.0, 10.0, 1.0)] - lak_2 = ModflowGwflak( - gwf2, - pname="lak2", - print_input=True, - mover=True, - nlakes=1, - noutlets=0, - ntables=0, - packagedata=lakpd, - connectiondata=lakecn, - ) - - # gwf-gwf - gwfgwf_data = [] - for col in range(0, ncol): - gwfgwf_data.append([(0, 0, col), (0, 0, col), 1, 0.5, 0.5, 1.0, 0.0, 1.0]) - gwfgwf = ModflowGwfgwf( - sim, - exgtype="GWF6-GWF6", - nexg=len(gwfgwf_data), - exgmnamea=gwf1.name, - exgmnameb=gwf2.name, - exchangedata=gwfgwf_data, - auxiliary=["ANGLDEGX", "CDIST"], - filename="flow1_flow2.gwfgwf", - ) - # set up mvr package - wel_1 = gwf1.get_package("wel") - wel_1.mover.set_data(True) - wel_name_1 = wel_1.name[0] - lak_name_2 = lak_2.name[0] - package_data = [(gwf1.name, wel_name_1), (gwf2.name, lak_name_2)] - period_data = [(gwf1.name, wel_name_1, 0, gwf2.name, lak_name_2, 0, "FACTOR", 1.0)] - fname = "gwfgwf.input.mvr" - gwfgwf.mvr.initialize( - filename=fname, - modelnames=True, - print_input=True, - print_flows=True, - maxpackages=2, - maxmvr=1, - packages=package_data, - perioddata=period_data, - ) - - gnc_data = [] - for col in range(0, ncol): - if col < ncol / 2.0: - gnc_data.append(((0, 0, col), (0, 0, col), (0, 0, col + 1), 0.25)) - else: - gnc_data.append(((0, 0, col), (0, 0, col), (0, 0, col - 1), 0.25)) - - # set up gnc package - fname = "gwfgwf.input.gnc" - gwfgwf.gnc.initialize( - filename=fname, - print_input=True, - print_flows=True, - numgnc=ncol, - numalphaj=1, - gncdata=gnc_data, - ) - - # Observe flow for exchange - gwfgwfobs = {} - obs_list = [] - for col in range(0, ncol): - obs_list.append([f"exchange_flow_{col}", "FLOW-JA-FACE", (col,)]) - gwfgwfobs["gwfgwf.output.obs.csv"] = obs_list - fname = "gwfgwf.input.obs" - gwfgwf.obs.initialize( - filename=fname, digits=25, print_input=True, continuous=gwfgwfobs - ) - - # Create gwt model - sourcerecarray = [("WEL-1", "AUX", "CONCENTRATION")] - gwt = get_gwt_model( - sim, - model_names[2], - model_names[2], - (nlay, nrow, ncol), - sourcerecarray=sourcerecarray, - ) - - # GWF GWT exchange - gwfgwt = ModflowGwfgwt( - sim, - exgtype="GWF6-GWT6", - exgmnamea=model_names[0], - exgmnameb=model_names[2], - filename="flow1_transport1.gwfgwt", - ) - - # solver settings - nouter, ninner = 100, 300 - hclose, rclose, relax = 1e-6, 1e-6, 1.0 - - # create iterative model solution and register the gwf model with it - imsgwf = ModflowIms( - sim, - print_option="SUMMARY", - outer_dvclose=hclose, - outer_maximum=nouter, - under_relaxation="NONE", - inner_maximum=ninner, - inner_dvclose=hclose, - rcloserecord=rclose, - linear_acceleration="BICGSTAB", - scaling_method="NONE", - reordering_method="NONE", - relaxation_factor=relax, - filename="flow.ims", - ) - - # create iterative model solution and register the gwt model with it - imsgwt = ModflowIms( - sim, - print_option="SUMMARY", - outer_dvclose=hclose, - outer_maximum=nouter, - under_relaxation="NONE", - inner_maximum=ninner, - inner_dvclose=hclose, - rcloserecord=rclose, - linear_acceleration="BICGSTAB", - scaling_method="NONE", - reordering_method="NONE", - relaxation_factor=relax, - filename="transport.ims", - ) - sim.register_ims_package(imsgwt, [gwt.name]) - - sim.write_simulation() - sim.run_simulation() - - # reload simulation - sim2 = MFSimulation.load(sim_ws=str(function_tmpdir)) - - # check ims registration - solution_recarray = sim2.name_file.solutiongroup - for solution_group_num in solution_recarray.get_active_key_list(): - rec_array = solution_recarray.get_data(solution_group_num[0]) - assert rec_array[0][1] == "flow.ims" - assert rec_array[0][2] == model_names[0] - assert rec_array[0][3] == model_names[1] - assert rec_array[1][1] == "transport.ims" - assert rec_array[1][2] == model_names[2] - assert gwf1.get_ims_package() is gwf2.get_ims_package() - assert gwf1.get_ims_package().filename == "flow.ims" - assert gwt.get_ims_package().filename == "transport.ims" - # test ssm fileinput - gwt2 = sim2.get_model("gwt_model_1") - ssm2 = gwt2.get_package("ssm") - fileinput = [ - ("RCH-1", "gwt_model_1.rch1.spc"), - ("RCH-2", "gwt_model_1.rch2.spc"), - ("RCH-3", "gwt_model_1.rch3.spc", "MIXED"), - ("RCH-4", "gwt_model_1.rch4.spc"), - ] - ssm2.fileinput = fileinput - fi_out = ssm2.fileinput.get_data() - assert fi_out[2][1] == "gwt_model_1.rch3.spc" - assert fi_out[1][2] is None - assert fi_out[2][2] == "MIXED" - - spca1 = ModflowUtlspca(gwt2, filename="gwt_model_1.rch1.spc", print_input=True) - spca2 = ModflowUtlspca(gwt2, filename="gwt_model_1.rch2.spc", print_input=False) - spca3 = ModflowUtlspca(gwt2, filename="gwt_model_1.rch3.spc", print_input=True) - spca4 = ModflowUtlspca(gwt2, filename="gwt_model_1.rch4.spc", print_input=True) - - # test writing and loading spca packages - sim2.write_simulation() - sim3 = MFSimulation.load(sim_ws=sim2.sim_path) - gwt3 = sim3.get_model("gwt_model_1") - spc1 = gwt3.get_package("gwt_model_1.rch1.spc") - assert isinstance(spc1, ModflowUtlspca) - assert spc1.print_input.get_data() is True - spc2 = gwt3.get_package("gwt_model_1.rch2.spc") - assert spc2.print_input.get_data() is not True - - # create a new gwt model - sourcerecarray = [("WEL-1", "AUX", "CONCENTRATION")] - gwt_2 = get_gwt_model( - sim, - model_names[3], - model_names[3], - (nlay, nrow, ncol), - sourcerecarray=sourcerecarray, - ) - # register gwt model with transport.ims - sim.register_ims_package(imsgwt, gwt_2.name) - # flow and transport exchange - gwfgwt = ModflowGwfgwt( - sim, - exgtype="GWF6-GWT6", - exgmnamea=model_names[1], - exgmnameb=model_names[3], - filename="flow2_transport2.gwfgwt", - ) - # save and run updated model - sim.write_simulation() - sim.run_simulation() - - with pytest.raises( - flopy.mf6.mfbase.FlopyException, - match='Extraneous kwargs "param_does_not_exist" provided to MFPackage.', - ): - # test kwargs error checking - wel = ModflowGwfwel( - gwf2, - print_input=True, - print_flows=True, - stress_period_data=welspd, - save_flows=False, - auxiliary="CONCENTRATION", - pname="WEL-1", - param_does_not_exist=True, - ) - - -@requires_exe("mf6") -def test_namefile_creation(function_tmpdir): - test_ex_name = "test_namefile" - # build MODFLOW 6 files - sim = MFSimulation( - sim_name=test_ex_name, - version="mf6", - exe_name="mf6", - sim_ws=str(function_tmpdir), - ) - - tdis_rc = [(6.0, 2, 1.0), (6.0, 3, 1.0), (6.0, 3, 1.0), (6.0, 3, 1.0)] - tdis = ModflowTdis(sim, time_units="DAYS", nper=4, perioddata=tdis_rc) - ims_package = ModflowIms( - sim, - pname="my_ims_file", - filename=f"{test_ex_name}.ims", - print_option="ALL", - complexity="SIMPLE", - outer_dvclose=0.0001, - outer_maximum=50, - under_relaxation="NONE", - inner_maximum=30, - inner_dvclose=0.0001, - linear_acceleration="CG", - preconditioner_levels=7, - preconditioner_drop_tolerance=0.01, - number_orthogonalizations=2, - ) - model = ModflowGwf( - sim, - modelname=test_ex_name, - model_nam_file=f"{test_ex_name}.nam", - ) - - # try to create simulation name file - ex_happened = False - try: - nam = ModflowNam(sim) - except flopy.mf6.mfbase.FlopyException: - ex_happened = True - assert ex_happened - - # try to create model name file - ex_happened = False - try: - nam = ModflowGwfnam(model) - except flopy.mf6.mfbase.FlopyException: - ex_happened = True - assert ex_happened - - -def test_remove_model(function_tmpdir, example_data_path): - # load a multi-model simulation - sim_ws = str(example_data_path / "mf6" / "test006_2models_mvr") - sim = MFSimulation.load(sim_ws=sim_ws, exe_name="mf6") - - # original simulation should contain models: - # - 'parent', with files named 'model1.ext' - # - 'child', with files named 'model2.ext' - assert len(sim.model_names) == 2 - assert "parent" in sim.model_names - assert "child" in sim.model_names - - # remove the child model - sim.remove_model("child") - - # simulation should now only contain the parent model - assert len(sim.model_names) == 1 - assert "parent" in sim.model_names - - # write simulation input files - sim.set_sim_path(function_tmpdir) - sim.write_simulation() - - # there should be no input files for the child model - files = list(function_tmpdir.glob("*")) - assert not any("model2" in f.name for f in files) - - # there should be no model or solver entry for the child model - # in the simulation namefile - lines = open(function_tmpdir / "mfsim.nam").readlines() - lines = [l.lower().strip() for l in lines] - assert not any("model2" in l for l in lines) - assert not any("child" in l for l in lines) - - # there should be no exchanges either - exg_index = 0 - for i, l in enumerate(lines): - if "begin exchanges" in l: - exg_index = i - elif exg_index > 0: - assert "end exchanges" in l - break - - -@requires_pkg("shapely") -@requires_exe("triangle") -def test_flopy_2283(function_tmpdir): - # create triangular grid - triangle_ws = function_tmpdir / "triangle" - triangle_ws.mkdir() - - active_area = [(0, 0), (0, 1000), (1000, 1000), (1000, 0)] - tri = Triangle(model_ws=triangle_ws, angle=30) - tri.add_polygon(active_area) - tri.add_region((1, 1), maximum_area=50**2) - - tri.build() - - # build vertex grid object - vgrid = flopy.discretization.VertexGrid( - vertices=tri.get_vertices(), - cell2d=tri.get_cell2d(), - xoff=199000, - yoff=215500, - crs=31370, - angrot=30, - ) - - # coord info is set (also correct when using vgrid.set_coord_info() - print(vgrid) - - # create MODFLOW 6 model - ws = function_tmpdir / "model" - ws.mkdir() - sim = flopy.mf6.MFSimulation(sim_name="prj-test", sim_ws=ws) - tdis = flopy.mf6.ModflowTdis(sim) - ims = flopy.mf6.ModflowIms(sim) - - gwf = flopy.mf6.ModflowGwf(sim, modelname="gwf") - disv = flopy.mf6.ModflowGwfdisv( - gwf, - xorigin=vgrid.xoffset, - yorigin=vgrid.yoffset, - angrot=vgrid.angrot, # no CRS info can be set in DISV - nlay=1, - top=0.0, - botm=-10.0, - ncpl=vgrid.ncpl, - nvert=vgrid.nvert, - cell2d=vgrid.cell2d, - vertices=tri.get_vertices(), # this is not stored in the Vertex grid object? - ) - - assert gwf.modelgrid.xoffset == disv.xorigin.get_data() - assert gwf.modelgrid.yoffset == disv.yorigin.get_data() - assert gwf.modelgrid.angrot == disv.angrot.get_data() From 9c13d515c3a249a3909f74c453b254477796a735 Mon Sep 17 00:00:00 2001 From: mjreno Date: Wed, 27 Aug 2025 08:45:13 -0400 Subject: [PATCH 29/44] add try/except --- autotest/test_mf6.py | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/autotest/test_mf6.py b/autotest/test_mf6.py index 42edfbff85..dfa7311a6d 100644 --- a/autotest/test_mf6.py +++ b/autotest/test_mf6.py @@ -39,7 +39,6 @@ ModflowGwfsto, ModflowGwfuzf, ModflowGwfwel, - ModflowGwfwelg, ModflowGwtadv, ModflowGwtdis, ModflowGwtic, @@ -1963,8 +1962,17 @@ def test_array(function_tmpdir): @requires_exe("mf6") -@pytest.mark.slow def test_grid_array(function_tmpdir): + try: + from flopy.mf6 import ( + ModflowGwfchdg, + ModflowGwfdrng, + ModflowGwfghbg, + ModflowGwfrivg, + ModflowGwfwelg, + ) + except ImportError: + return # get_data # empty data in period block vs data repeating # array @@ -2388,14 +2396,25 @@ def test_grid_array(function_tmpdir): # TODO wel_q_array = wel.q.array + assert np.all(wel_q_array[0][0] == 0.0) assert wel_q_array[1][0][0, 0, 0] == 0.25 assert wel_q_array[2][0][0, 0, 0] == 0.1 - # assert wel_array[3][0][1] == 0.1 + assert wel_q_array[3][0][0, 0, 0] == 0.1 welg_q_per = wel.q.get_data() assert welg_q_per[0] is None assert welg_q_per[1][0, 0, 0] == 0.25 assert welg_q_per[2][0, 0, 0] == 0.1 - # assert welg_q_per[3][0, 0, 0] == 0.1 + assert welg_q_per[3] is None + wel_aux_array = wel.aux.array + assert np.all(wel_aux_array[0][0] == 0.0) + assert wel_aux_array[1][0][0, 0, 0] == 0.0 + assert wel_aux_array[2][0][0, 0, 0] == 0.0 + assert wel_aux_array[3][0][0, 0, 0] == 0.0 + welg_aux_per = wel.aux.get_data() + assert welg_aux_per[0] is None + assert welg_aux_per[1][0][0, 0, 0] == 0.0 + assert welg_aux_per[2][0][0, 0, 0] == 0.0 + assert welg_aux_per[3] is None welg_q_per1 = wel.q.get_data(1) # print(wel.q.array) From f6c3d02bc7dbb768e1598083af7ba74e3e53a7b0 Mon Sep 17 00:00:00 2001 From: mjreno Date: Wed, 27 Aug 2025 09:00:33 -0400 Subject: [PATCH 30/44] restore workflow --- .github/workflows/commit.yml | 5 +---- flopy/mf6/mfpackage.py | 6 ++---- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/.github/workflows/commit.yml b/.github/workflows/commit.yml index e294c4cbb9..299b55e74d 100644 --- a/.github/workflows/commit.yml +++ b/.github/workflows/commit.yml @@ -166,6 +166,7 @@ jobs: working-directory: flopy run: | pixi run --manifest-path=../modflow6/pixi.toml pip install --no-deps -e . + pixi run --manifest-path=../modflow6/pixi.toml python -m flopy.mf6.utils.generate_classes --dfnpath ../modflow6/doc/mf6io/mf6ivar/dfn - name: Build MF6 working-directory: modflow6 @@ -174,10 +175,6 @@ jobs: pixi run meson install -C builddir pixi run meson test --verbose --no-rebuild -C builddir - - name: Update FloPy packages - working-directory: modflow6 - run: python -m flopy.mf6.utils.generate_classes --dfnpath doc/mf6io/mf6ivar/dfn - - name: Run tests working-directory: flopy/autotest run: | diff --git a/flopy/mf6/mfpackage.py b/flopy/mf6/mfpackage.py index c0b6db12f2..9b9f57f7ff 100644 --- a/flopy/mf6/mfpackage.py +++ b/flopy/mf6/mfpackage.py @@ -3678,8 +3678,7 @@ def _update_data(nc_info, key, dobj=None, data=None): if mesh == None: if dobj.repeating: if iaux >= 0: - auxkeys = list(data) - for k in auxkeys: + for k in data: if data[k] is None: continue istp = sum(modeltime.nstp[0:k]) @@ -3708,8 +3707,7 @@ def _update_data(nc_info, key, dobj=None, data=None): layer = -1 if dobj.repeating: if iaux >= 0: - auxkeys = list(data) - for k in auxkeys: + for k in data: if data[k] is None: continue auxdata = data[k][iaux] From 6c84910fc3f862e27ee68dd994cba3c868c491fb Mon Sep 17 00:00:00 2001 From: mjreno Date: Mon, 1 Sep 2025 12:22:29 -0400 Subject: [PATCH 31/44] refactor readarraygrid aux variables to store layered --- autotest/test_mf6.py | 143 ++++++++++++++++--------- flopy/mf6/data/mfdataarray.py | 180 ++++++++++++++++++++------------ flopy/mf6/data/mfdatastorage.py | 113 ++++++++++---------- flopy/mf6/data/mffileaccess.py | 31 +++++- flopy/mf6/mfpackage.py | 27 +++-- 5 files changed, 309 insertions(+), 185 deletions(-) diff --git a/autotest/test_mf6.py b/autotest/test_mf6.py index dfa7311a6d..64057ac703 100644 --- a/autotest/test_mf6.py +++ b/autotest/test_mf6.py @@ -1963,6 +1963,8 @@ def test_array(function_tmpdir): @requires_exe("mf6") def test_grid_array(function_tmpdir): + import warnings + try: from flopy.mf6 import ( ModflowGwfchdg, @@ -1972,6 +1974,8 @@ def test_grid_array(function_tmpdir): ModflowGwfwelg, ) except ImportError: + msg = "test_mf6 test_grid_array did not run" + warnings.warn(msg, UserWarning) return # get_data # empty data in period block vs data repeating @@ -2015,7 +2019,7 @@ def test_grid_array(function_tmpdir): delc=5000.0, top=100.0, botm=[50.0, 0.0, -50.0, -100.0], - filename=f"{model_name} 1.dis", + filename=f"{model_name}.dis", ) ic_package = ModflowGwfic(model, strt=90.0, filename=f"{model_name}.ic") npf_package = ModflowGwfnpf( @@ -2091,6 +2095,8 @@ def test_grid_array(function_tmpdir): q = np.full((nlay, nrow, ncol), DNODATA, dtype=float) welconc = np.full((nlay, nrow, ncol), DNODATA, dtype=float) welaux2 = np.full((nlay, nrow, ncol), DNODATA, dtype=float) + # TODO constant by layer + # welaux2 = [DNODATA] if n == 1: q[0, 0, 0] = 0.25 welconc[0, 0, 0] = 0.0 @@ -2115,6 +2121,12 @@ def test_grid_array(function_tmpdir): aux=welconcspd, ) + print(wel.aux.array) + print(wel.aux.get_data()) + + # sim.write_simulation() + # assert False + assert len(wel.q.array) == 4 assert len(wel.q.get_data()) == 4 assert len(wel.aux.array) == 4 @@ -2135,15 +2147,6 @@ def test_grid_array(function_tmpdir): assert np.allclose(wel.aux.array[2][1], wel.aux.get_data()[2][1]) assert np.allclose(wel.aux.array[3][0], wel.aux.get_data()[3][0]) assert np.allclose(wel.aux.array[3][1], wel.aux.get_data()[3][1]) - # assert wel.q.get_data()[0] is None - # assert wel.q.get_data(0) is None - # assert np.allclose(wel.q.get_data()[1], wel.q.get_data(1)) - # assert np.allclose(wel.q.get_data()[2], wel.q.get_data(2)) - assert len(wel.q.array) == 4 - # assert np.allclose(wel.q.array[1], wel.q.get_data(1)) - # assert np.allclose(wel.q.array[2], wel.q.get_data(2)) - # assert wel.q.get_data()[3] is None - # assert wel.q.get_data(3) is None assert not wel.has_stress_period_data q_nan = np.where(wel.q.array == DNODATA, np.nan, wel.q.array) @@ -2167,10 +2170,10 @@ def test_grid_array(function_tmpdir): assert aux_data_2[1][0][0][0] == 0.0 aux_data_3 = wel.aux.get_data(3) assert np.all(aux_data_3[0] == DNODATA) - # assert wel.q[0] is None - # assert wel.q[1[0][1] == 0.25 # remove test wel package + sim.write_simulation() + # assert False wel.remove() welqspd = {} @@ -2237,32 +2240,70 @@ def test_grid_array(function_tmpdir): aux_data_3 = wel.aux.get_data(3) assert aux_data_3 is None - drnspdict = { - 0: [[(0, 0, 0), 60.0, 10.0]], - 2: [], - 3: [[(0, 0, 0), 55.0, 5.0]], - } - drn = ModflowGwfdrn( + # drnspdict = { + # 0: [[(0, 0, 0), 60.0, 10.0]], + # 2: [], + # 3: [[(0, 0, 0), 55.0, 5.0]], + # } + # drn = ModflowGwfdrn( + # model, + # print_input=True, + # print_flows=True, + # stress_period_data=drnspdict, + # save_flows=False, + # pname="DRN-1", + # ) + + drnelevspd = {} + drncondspd = {} + for n in range(4): + elev = np.full((nlay, nrow, ncol), DNODATA, dtype=float) + cond = np.full((nlay, nrow, ncol), DNODATA, dtype=float) + if n == 0: + elev[0, 0, 0] = 60.0 + cond[0, 0, 0] = 10.0 + elif n == 3: + elev[0, 0, 0] = 55.0 + cond[0, 0, 0] = 5.0 + if n != 1: + drnelevspd[n] = elev + drncondspd[n] = cond + + # create drng package + drn = ModflowGwfdrng( model, print_input=True, print_flows=True, - stress_period_data=drnspdict, save_flows=False, pname="DRN-1", - ) - drn_array = drn.stress_period_data.array - assert drn_array[0][0][1] == 60.0 - assert drn_array[1][0][1] == 60.0 - assert drn_array[2] is None - assert drn_array[3][0][1] == 55.0 - drn_gd_0 = drn.stress_period_data.get_data(0) - assert drn_gd_0[0][1] == 60.0 - drn_gd_1 = drn.stress_period_data.get_data(1) - assert drn_gd_1 is None - drn_gd_2 = drn.stress_period_data.get_data(2) - assert len(drn_gd_2) == 0 - drn_gd_3 = drn.stress_period_data.get_data(3) - assert drn_gd_3[0][1] == 55.0 + elev=drnelevspd, + cond=drncondspd, + ) + + drn_elev_array = drn.elev.array + drn_cond_array = drn.cond.array + drn_elev_data = drn.elev.get_data() + drn_cond_data = drn.cond.get_data() + assert len(drn_elev_array) == 4 + assert len(drn_cond_array) == 4 + assert len(drn_elev_data) == 4 + assert len(drn_cond_data) == 4 + assert np.allclose(drn_elev_array[0], drn_elev_data[0]) + assert np.allclose(drn_elev_array[0], drn.elev.get_data(0)) + + # drn_array = drn.stress_period_data.array + # assert drn_array[0][0][1] == 60.0 + # assert drn_array[1][0][1] == 60.0 + # assert drn_array[2] is None + # assert drn_array[3][0][1] == 55.0 + # drn_gd_0 = drn.stress_period_data.get_data(0) + # assert drn_gd_0[0][1] == 60.0 + # drn_gd_1 = drn.stress_period_data.get_data(1) + # assert drn_gd_1 is None + # drn_gd_2 = drn.stress_period_data.get_data(2) + # assert len(drn_gd_2) == 0 + # drn_gd_3 = drn.stress_period_data.get_data(3) + # assert drn_gd_3[0][1] == 55.0 ghbspdict = { 0: [[(0, 1, 1), 60.0, 10.0]], @@ -2343,11 +2384,11 @@ def test_grid_array(function_tmpdir): ) # test writing and loading model - print(wel.aux.array) + # print(wel.aux.array) sim.write_simulation() - print(wel.aux.array) + # print(wel.aux.array) sim.run_simulation() - print(wel.aux.array) + # print(wel.aux.array) test_sim = MFSimulation.load( sim_name, @@ -2363,7 +2404,7 @@ def test_grid_array(function_tmpdir): drn = model.get_package("drn") lak = model.get_package("lak") lak_tab = model.get_package("laktab") - assert os.path.split(dis.filename)[1] == f"{model_name} 1.dis" + assert os.path.split(dis.filename)[1] == f"{model_name}.dis" # do same tests as above val_irch = rcha.irch.array.sum(axis=(1, 2, 3)) assert val_irch[0] == 4 @@ -2406,6 +2447,8 @@ def test_grid_array(function_tmpdir): assert welg_q_per[2][0, 0, 0] == 0.1 assert welg_q_per[3] is None wel_aux_array = wel.aux.array + print(wel_aux_array) + # assert False assert np.all(wel_aux_array[0][0] == 0.0) assert wel_aux_array[1][0][0, 0, 0] == 0.0 assert wel_aux_array[2][0][0, 0, 0] == 0.0 @@ -2422,19 +2465,19 @@ def test_grid_array(function_tmpdir): welg_aux_per1 = wel.aux.get_data(1) assert welg_aux_per1[0][0, 0, 0] == 0.0 - drn_array = drn.stress_period_data.array - assert drn_array[0][0][1] == 60.0 - assert drn_array[1][0][1] == 60.0 - assert drn_array[2] is None - assert drn_array[3][0][1] == 55.0 - drn_gd_0 = drn.stress_period_data.get_data(0) - assert drn_gd_0[0][1] == 60.0 - drn_gd_1 = drn.stress_period_data.get_data(1) - assert drn_gd_1 is None - drn_gd_2 = drn.stress_period_data.get_data(2) - assert len(drn_gd_2) == 0 - drn_gd_3 = drn.stress_period_data.get_data(3) - assert drn_gd_3[0][1] == 55.0 + # drn_array = drn.stress_period_data.array + # assert drn_array[0][0][1] == 60.0 + # assert drn_array[1][0][1] == 60.0 + # assert drn_array[2] is None + # assert drn_array[3][0][1] == 55.0 + # drn_gd_0 = drn.stress_period_data.get_data(0) + # assert drn_gd_0[0][1] == 60.0 + # drn_gd_1 = drn.stress_period_data.get_data(1) + # assert drn_gd_1 is None + # drn_gd_2 = drn.stress_period_data.get_data(2) + # assert len(drn_gd_2) == 0 + # drn_gd_3 = drn.stress_period_data.get_data(3) + # assert drn_gd_3[0][1] == 55.0 lak_tab_array = lak.tables.get_data() assert lak_tab_array[0][1] == "lak01.tab" diff --git a/flopy/mf6/data/mfdataarray.py b/flopy/mf6/data/mfdataarray.py index a51df58e49..e4b2f5fe04 100644 --- a/flopy/mf6/data/mfdataarray.py +++ b/flopy/mf6/data/mfdataarray.py @@ -735,7 +735,6 @@ def _get_data(self, layer=None, apply_mult=False, **kwargs): and isinstance(self, MFTransientArray) and data is not [] # noqa: F632 and not self._is_grid_aux() - and not self._is_layered_aux() ): data = np.expand_dims(data, 0) return data @@ -921,6 +920,38 @@ def _set_data( return storage = self._get_storage_obj() + + def _aux_storage_set(layer_storage, layer_storage_data): + if ( + layer > 0 + and layer >= storage.layer_storage.get_total_size() + ): + storage.add_layer() + try: + storage.set_data( + layer_storage_data, + [layer_storage], + multiplier, + self._current_key, + preserve_record=preserve_record, + ) + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.structure.get_model(), + self.structure.get_package(), + self._path, + "setting data", + self.structure.name, + inspect.stack()[0][3], + type_, + value_, + traceback_, + None, + self._simulation_data.debug, + ex, + ) + if self.structure.name == "aux" and layer is None: if isinstance(data, dict): aux_data = copy.deepcopy(data["data"]) @@ -938,48 +969,48 @@ def _set_data( aux_var_names = ( self.data_dimensions.package_dim.get_aux_variables() ) - if len(aux_data) == len(aux_var_names[0]) - 1: - for layer, aux_var_data in enumerate(aux_data): - if ( - layer > 0 - and layer >= storage.layer_storage.get_total_size() - ): - storage.add_layer() - if isinstance(data, dict): - # put layer data back in dictionary - layer_data = data - layer_data["data"] = aux_var_data - else: - layer_data = aux_var_data - try: - storage.set_data( - layer_data, - [layer], - multiplier, - self._current_key, - preserve_record=preserve_record, - ) - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.structure.get_model(), - self.structure.get_package(), - self._path, - "setting data", - self.structure.name, - inspect.stack()[0][3], - type_, - value_, - traceback_, - None, - self._simulation_data.debug, - ex, - ) + if len(aux_data) == (len(aux_var_names[0]) - 1): + if self._is_grid_aux(): + modelgrid = self.data_dimensions.get_model_grid() + nlayer = modelgrid.num_layers() + + for iaux, grid_aux in enumerate(aux_data): + if nlayer == 1: + layer = iaux + if isinstance(data, dict): + # put layer data back in dictionary + layer_data = data + layer_data["data"] = grid_aux + else: + layer_data = grid_aux + + _aux_storage_set(layer, layer_data) + else: + for ilayer, aux_layer_data in enumerate(grid_aux): + layer = iaux * nlayer + ilayer + if isinstance(data, dict): + # put layer data back in dictionary + layer_data = data + layer_data["data"] = aux_layer_data + else: + layer_data = aux_layer_data + + _aux_storage_set(layer, layer_data) + else: + for layer, aux_var_data in enumerate(aux_data): + if isinstance(data, dict): + # put layer data back in dictionary + layer_data = data + layer_data["data"] = aux_var_data + else: + layer_data = aux_var_data + + _aux_storage_set(layer, layer_data) else: message = ( "Unable to set data for aux variable. " "Expected {} aux variables but got " - "{}.".format(len(aux_var_names[0]), len(data)) + "{}.".format(len(aux_var_names[0])-1, len(data)) ) type_, value_, traceback_ = sys.exc_info() raise MFDataException( @@ -1090,6 +1121,11 @@ def load( if self._layer_shape[-1] != model_grid.num_layers(): if model_grid.grid_type() == DiscretizationType.DISU: self._layer_shape = (1,) + #elif self._is_grid_aux(): + # self._layer_shape = ( + # model_grid.num_layers() * + # (len(self.data_dimensions.package_dim.get_aux_variables()[0]) - 1) + # ) else: self._layer_shape = (model_grid.num_layers(),) if self._layer_shape[-1] is None: @@ -1098,6 +1134,7 @@ def load( self._set_storage_obj( self._new_storage(shape_ml.get_total_size() != 1, True) ) + storage = self._get_storage_obj() if external_file_info is not None: storage.point_to_existing_external_file( @@ -1127,6 +1164,7 @@ def _is_layered_aux(self): # determine if this is the special aux variable case if ( self.structure.name.lower() == "aux" + #and not self.structure.layered and self._get_storage_obj().layered ): return True @@ -1177,6 +1215,7 @@ def _get_file_entry( return "" layered_aux = self._is_layered_aux() + grid_aux = self._is_grid_aux() # prepare indent indent = self._simulation_data.indent_string @@ -1254,7 +1293,34 @@ def _get_file_entry( layer_min = layer layer_max = shape_ml.inc_shape_idx(layer) - if layered_aux: + if grid_aux: + modelgrid = self.data_dimensions.get_model_grid() + nlayer = modelgrid.num_layers() + aux_var_names = ( + self.data_dimensions.package_dim.get_aux_variables()[0] + ) + for iaux in range(0, len(aux_var_names)-1): + auxname = aux_var_names[iaux + 1] + if data_storage.netcdf: + if data_storage.has_data(): + file_entry_array.append(f"{indent}{auxname}{indent}NETCDF\n") + else: + if nlayer > 1: + file_entry_array.append(f"{indent}{auxname}{indent}LAYERED\n") + else: + file_entry_array.append(f"{indent}{auxname}\n") + for ilayer in range(nlayer): + sto_layer = iaux * nlayer + ilayer + file_entry_array.append( + self._get_file_entry_layer( + (sto_layer,), + data_indent, + data_storage.layer_storage[sto_layer].data_storage_type, + ext_file_action, + ) + ) + + elif layered_aux: aux_var_names = ( self.data_dimensions.package_dim.get_aux_variables()[0] ) @@ -1272,6 +1338,7 @@ def _get_file_entry( layered_aux, ) ) + elif data_storage.netcdf: file_entry_array.append(f"{indent}{self.structure.name}{indent}NETCDF\n") @@ -1481,12 +1548,7 @@ def _get_data_layer_string(self, layer, data_indent): self._path, self._current_key, ) - if self._is_grid_aux(): - return file_access.get_data_string( - [a.ravel().tolist() for a in data], self._data_type, data_indent - ) - else: - return file_access.get_data_string(data, self._data_type, data_indent) + return file_access.get_data_string(data, self._data_type, data_indent) def _resolve_layer_index(self, layer, allow_multiple_layers=False): # handle layered vs non-layered data @@ -1818,8 +1880,7 @@ def _get_array(self, num_sp, apply_mult, **kwargs): if sp in self._data_storage: self.get_data_prep(sp) data = super().get_data(apply_mult=apply_mult, **kwargs) - if not (self._is_grid_aux() or self._is_layered_aux()): - data = np.expand_dims(data, 0) + data = np.expand_dims(data, 0) else: # if there is no previous data provide array of # zeros, otherwise provide last array of data found @@ -1837,29 +1898,18 @@ def _get_array(self, num_sp, apply_mult, **kwargs): data = np.full_like(data, 1) else: data = np.full_like(data, 0.0) - if not (self._is_grid_aux() or self._is_layered_aux()): - data = np.expand_dims(data, 0) + data = np.expand_dims(data, 0) if output is None or data is None: output = data - if data is not None: - baseshape = np.shape(data) - if self._is_grid_aux(): - output = np.expand_dims(output, 0) else: - if np.all(output == None): - baseshape = np.shape(data) - output = np.full(np.shape(data), np.nan, self.dtype) - output = np.concatenate((output, data)) - if self._is_grid_aux(): - output = np.expand_dims(output, 0) - elif np.all(data == None): - anone = np.full(baseshape, np.nan, self.dtype) + if np.all(data == None): + anone = np.full(baseshape, np.nan, np.float64) output = np.append(output, anone, axis=0) else: - if self._is_grid_aux() and np.shape(data) == baseshape: - data = np.expand_dims(data, 0) + if np.all(output == None): + baseshape = np.shape(data) + output = np.full(baseshape, np.nan, self.dtype) output = np.concatenate((output, data)) - return output def has_data(self, layer=None): diff --git a/flopy/mf6/data/mfdatastorage.py b/flopy/mf6/data/mfdatastorage.py index 3c47eabd21..72274dbb64 100644 --- a/flopy/mf6/data/mfdatastorage.py +++ b/flopy/mf6/data/mfdatastorage.py @@ -1017,6 +1017,14 @@ def _set_array( ): data = [data] + grid_aux = ( + self.data_dimensions.structure.name == "aux" + and self.data_dimensions.structure.layered + ) + if grid_aux: + model_grid = self.data_dimensions.get_model_grid() + nlay = model_grid.num_layers() + success = False if preserve_record: if isinstance(data, np.ndarray): @@ -1037,14 +1045,24 @@ def _set_array( elif isinstance(data, dict): first_key = list(data.keys())[0] if isinstance(first_key, int): - for layer_num, data_layer in data.items(): - success = self._set_array_layer( - data_layer, - layer_num, - multiplier, - key, - preserve_record, - ) + if grid_aux: + for l in range(nlay): + success = self._set_array_layer( + data[l], + layer, + multiplier, + key, + preserve_record, + ) + else: + for layer_num, data_layer in data.items(): + success = self._set_array_layer( + data_layer, + layer_num, + multiplier, + key, + preserve_record, + ) if not success: # storing while preserving the record failed, try storing as a @@ -2355,6 +2373,21 @@ def _build_full_data(self, apply_multiplier=False): if self.data_structure_type == DataStructureType.scalar: return self.layer_storage.first_item().internal_data dimensions = self.get_data_dimensions(None) + layer_aux = ( + self.data_dimensions.structure.name == "aux" + and not self.data_dimensions.structure.layered + ) + grid_aux = ( + self.data_dimensions.structure.name == "aux" + and self.data_dimensions.structure.layered + ) + if grid_aux: + model_grid = self.data_dimensions.get_model_grid() + nlay = model_grid.num_layers() + package_dim = self.data_dimensions.package_dim + naux = len(package_dim.get_aux_variables()[0]) - 1 + if len(dimensions) <= 3 and dimensions[0] == nlay: + dimensions.insert(0, naux) if dimensions[0] < 0: # dimensions can not be determined from dfn file, use # the size of the data provided as the dimensions @@ -2371,25 +2404,14 @@ def _build_full_data(self, apply_multiplier=False): else: fill_value = None full_data = np.full(dimensions, fill_value, np_full_data_type) - layer_aux = ( - self.data_dimensions.structure.name == "aux" - and not self.data_dimensions.structure.layered - ) - grid_aux = ( - self.data_dimensions.structure.name == "aux" - and self.data_dimensions.structure.layered - ) - if layer_aux or grid_aux: + is_aux = self.data_dimensions.structure.name == "aux" + if is_aux: aux_data = [] if not self.layered: layers_to_process = [0] - elif grid_aux: - layers_to_process = [] - auxvar = self.data_dimensions.package_dim.get_aux_variables()[0] - for i in range(len(auxvar) - 1): - layers_to_process.append(i) else: layers_to_process = self.layer_storage.indexes() + layers_to_process = self.layer_storage.indexes() for layer in layers_to_process: if ( self.layer_storage[layer].factor is not None @@ -2410,7 +2432,7 @@ def _build_full_data(self, apply_multiplier=False): or len(self.layer_storage[layer].internal_data) > 0 and self.layer_storage[layer].internal_data[0] is None ): - if layer_aux: + if is_aux: full_data = None else: return None @@ -2420,16 +2442,16 @@ def _build_full_data(self, apply_multiplier=False): or not self._has_layer_dim() ): full_data = self.layer_storage[layer].internal_data * mult + elif grid_aux: + ilayer = (layer[0]) % nlay + iaux = int((layer[0]) / nlay) + full_data[iaux][ilayer] = ( + self.layer_storage[layer].internal_data * mult + ) else: - if grid_aux: - full_data = ( - self.layer_storage[layer].internal_data * mult - ) - aux_data.append(full_data) - else: - full_data[layer] = ( - self.layer_storage[layer].internal_data * mult - ) + full_data[layer] = ( + self.layer_storage[layer].internal_data * mult + ) elif ( self.layer_storage[layer].data_storage_type == DataStorageType.internal_constant @@ -2440,9 +2462,6 @@ def _build_full_data(self, apply_multiplier=False): or not self._has_layer_dim() ): full_data = self._fill_const_layer(layer) * mult - elif grid_aux: - full_data = self._fill_const_grid(layer) * mult - aux_data.append(full_data) else: full_data[layer] = self._fill_const_layer(layer) * mult else: @@ -2490,7 +2509,7 @@ def _build_full_data(self, apply_multiplier=False): ): full_data = data_out else: - if layer_aux and full_data.shape == data_out.shape: + if is_aux and full_data.shape == data_out.shape: full_data = data_out else: full_data[layer] = data_out @@ -2508,11 +2527,6 @@ def _build_full_data(self, apply_multiplier=False): return None else: return np.stack(aux_data, axis=0) - elif grid_aux: - if len(aux_data) == 0: - return [full_data] - else: - return aux_data else: return full_data @@ -2541,18 +2555,6 @@ def _fill_const_layer(self, layer): ) return np.full(data_dimensions, ls.data_const_value[0], data_type) - def _fill_const_grid(self, layer): - data_dimensions = self.get_data_dimensions(None) - #ls = self.layer_storage.first_item() - ls = self.layer_storage[layer] - if data_dimensions[0] < 0: - return ls.data_const_value[0] - else: - data_type = self.data_dimensions.structure.get_datum_type( - numpy_type=True - ) - return np.full(data_dimensions, ls.data_const_value[0], data_type) - def _is_type(self, data_item, data_type): if data_type == DatumType.string or data_type == DatumType.keyword: return True @@ -3038,15 +3040,10 @@ def _get_max_min_data_line_size(data): def get_data_dimensions(self, layer): data_dimensions = self.data_dimensions.get_data_shape()[0] - grid_aux = ( - self.data_dimensions.structure.name == "aux" - and self.data_dimensions.structure.layered - ) if ( layer is not None and self.layer_storage.get_total_size() > 1 and self._has_layer_dim() - and not grid_aux ): # remove all "layer" dimensions from the list layer_dims = self.data_dimensions.structure.data_item_structures[ diff --git a/flopy/mf6/data/mffileaccess.py b/flopy/mf6/data/mffileaccess.py index c4429a82a4..5e6f885865 100644 --- a/flopy/mf6/data/mffileaccess.py +++ b/flopy/mf6/data/mffileaccess.py @@ -742,6 +742,10 @@ def load_from_package( else: index_num = 0 aux_var_index = None + grid_aux = ( + self._data_dimensions.structure.name == "aux" + and self._data_dimensions.structure.layered + ) # TODO: Add species support # if layered supported, look for layered flag @@ -749,6 +753,7 @@ def load_from_package( if ( len(arr_line) > index_num and arr_line[index_num].lower() == "layered" + and not grid_aux ): storage.layered = True try: @@ -771,6 +776,19 @@ def load_from_package( ) if len(layers) > 0: storage.init_layers(layers) + + elif grid_aux: + #if layer_shape[0] > 1: + storage.layered = True + auxidx = self._get_aux_var_index(arr_line[0]) + if auxidx == 0: + layers = ( + layer_shape[0] * + (len(self._data_dimensions.package_dim.get_aux_variables()[0]) - 1) + ) + while layers > storage.layer_storage.get_total_size(): + storage.add_layer() + elif aux_var_index is not None: # each layer stores a different aux variable layers = len(package_dim.get_aux_variables()[0]) - 1 @@ -778,6 +796,7 @@ def load_from_package( storage.layered = True while storage.layer_storage.list_shape[0] < layers: storage.add_layer() + else: storage.flatten() try: @@ -805,7 +824,17 @@ def load_from_package( for dimension in dimensions: layer_size *= dimension - if aux_var_index is None: + if grid_aux: + for l in range(layer_shape[0]): + self._load_layer( + (layer_shape[0] * auxidx + l,), + layer_size, + storage, + arr_line, + file_handle, + layer_shape, + ) + elif aux_var_index is None: # loop through the number of layers for layer in storage.layer_storage.indexes(): self._load_layer( diff --git a/flopy/mf6/mfpackage.py b/flopy/mf6/mfpackage.py index 9b9f57f7ff..7476b96cd5 100644 --- a/flopy/mf6/mfpackage.py +++ b/flopy/mf6/mfpackage.py @@ -856,6 +856,7 @@ def load(self, block_header, fd, strict=True): # handle special readasarrays case if ( self._container_package.structure.read_as_arrays + or self._container_package.structure.read_array_grid or ( hasattr(self._container_package, "aux") and self._container_package.aux.structure.layered @@ -3670,7 +3671,6 @@ def update_dataset(self, dataset, netcdf_info=None, mesh=None, update_data=True) dimmap["x"] = modelgrid.ncol def _update_data(nc_info, key, dobj=None, data=None): - from types import MappingProxyType if "modflow_iaux" in nc_info[key]["attrs"]: iaux = nc_info[key]["attrs"]["modflow_iaux"] - 1 else: @@ -3678,11 +3678,11 @@ def _update_data(nc_info, key, dobj=None, data=None): if mesh == None: if dobj.repeating: if iaux >= 0: - for k in data: - if data[k] is None: + for per in data: + if data[per] is None: continue - istp = sum(modeltime.nstp[0:k]) - auxdata = data[k][iaux] + istp = sum(modeltime.nstp[0:per]) + auxdata = data[per][iaux] dataset[nc_info[key]["varname"]].values[istp, :] = ( auxdata) else: @@ -3707,17 +3707,22 @@ def _update_data(nc_info, key, dobj=None, data=None): layer = -1 if dobj.repeating: if iaux >= 0: - for k in data: - if data[k] is None: + for per in data: + if data[per] is None: continue - auxdata = data[k][iaux] - istp = sum(modeltime.nstp[0:k]) + auxdata = data[per][iaux] + istp = sum(modeltime.nstp[0:per]) if self.structure.read_as_arrays: dataset[nc_info[key]["varname"]].values[istp, :] = ( auxdata.flatten()) elif self.structure.read_array_grid: - dataset[nc_info[key]["varname"]].values[istp, :] = ( - auxdata[layer].flatten()) + uidx = istp + auxdata[layer].size + if modelgrid.nlay > 1: + dataset[nc_info[key]["varname"]].values[istp, :] = ( + auxdata[layer].flatten()) + else: + dataset[nc_info[key]["varname"]].values[istp, :] = ( + auxdata.flatten()) else: for per in data: if data[per] is None: From d9c60d1266433c984dd75079773a0b27155e472b Mon Sep 17 00:00:00 2001 From: mjreno Date: Wed, 3 Sep 2025 07:54:35 -0400 Subject: [PATCH 32/44] test_grid_array mf6 test --- autotest/test_mf6.py | 155 ++++++++++++++------------------ flopy/mf6/data/mfdatastorage.py | 4 + flopy/mf6/data/mffileaccess.py | 1 - 3 files changed, 70 insertions(+), 90 deletions(-) diff --git a/autotest/test_mf6.py b/autotest/test_mf6.py index 64057ac703..b7cb43474b 100644 --- a/autotest/test_mf6.py +++ b/autotest/test_mf6.py @@ -1977,12 +1977,6 @@ def test_grid_array(function_tmpdir): msg = "test_mf6 test_grid_array did not run" warnings.warn(msg, UserWarning) return - # get_data - # empty data in period block vs data repeating - # array - # aux values, test that they work the same as other arrays (is a value - # of zero always used even if aux is defined in a previous stress - # period?) sim_name = "test_grid_array" model_name = "test_grid_array" @@ -2089,24 +2083,27 @@ def test_grid_array(function_tmpdir): ncol = dis.ncol.get_data() DNODATA = 3.0e30 # MF6 DNODATA constant + nper = 4 welqspd = {} welconcspd = {} for n in range(4): q = np.full((nlay, nrow, ncol), DNODATA, dtype=float) welconc = np.full((nlay, nrow, ncol), DNODATA, dtype=float) welaux2 = np.full((nlay, nrow, ncol), DNODATA, dtype=float) - # TODO constant by layer - # welaux2 = [DNODATA] + # welcaux2 = np.full((nlay), DNODATA, dtype=float) if n == 1: q[0, 0, 0] = 0.25 welconc[0, 0, 0] = 0.0 welaux2[0, 0, 0] = 9.0 + # welcaux2[0] = 9.0 elif n == 2: q[0, 0, 0] = 0.1 welconc[0, 0, 0] = 9.0 welaux2[0, 0, 0] = 0.0 + # welcaux2[0] = 0.0 welqspd[n] = q welconcspd[n] = [welconc, welaux2] + # welconcspd[n] = [welconc, welcaux2] # first create test package with multiple auxvars wel = ModflowGwfwelg( @@ -2121,32 +2118,16 @@ def test_grid_array(function_tmpdir): aux=welconcspd, ) - print(wel.aux.array) - print(wel.aux.get_data()) - # sim.write_simulation() - # assert False assert len(wel.q.array) == 4 assert len(wel.q.get_data()) == 4 assert len(wel.aux.array) == 4 assert len(wel.aux.get_data()) == 4 - assert np.allclose(wel.aux.array[0][0], wel.aux.get_data(0)[0]) - assert np.allclose(wel.aux.array[0][1], wel.aux.get_data(0)[1]) - assert np.allclose(wel.aux.array[1][0], wel.aux.get_data(1)[0]) - assert np.allclose(wel.aux.array[1][1], wel.aux.get_data(1)[1]) - assert np.allclose(wel.aux.array[2][0], wel.aux.get_data(2)[0]) - assert np.allclose(wel.aux.array[2][1], wel.aux.get_data(2)[1]) - assert np.allclose(wel.aux.array[3][0], wel.aux.get_data(3)[0]) - assert np.allclose(wel.aux.array[3][1], wel.aux.get_data(3)[1]) - assert np.allclose(wel.aux.array[0][0], wel.aux.get_data()[0][0]) - assert np.allclose(wel.aux.array[0][1], wel.aux.get_data()[0][1]) - assert np.allclose(wel.aux.array[1][0], wel.aux.get_data()[1][0]) - assert np.allclose(wel.aux.array[1][1], wel.aux.get_data()[1][1]) - assert np.allclose(wel.aux.array[2][0], wel.aux.get_data()[2][0]) - assert np.allclose(wel.aux.array[2][1], wel.aux.get_data()[2][1]) - assert np.allclose(wel.aux.array[3][0], wel.aux.get_data()[3][0]) - assert np.allclose(wel.aux.array[3][1], wel.aux.get_data()[3][1]) + for p in range(nper): + for iaux in range(2): # naux + assert np.allclose(wel.aux.array[p][iaux], wel.aux.get_data(p)[iaux]) + assert np.allclose(wel.aux.array[p][iaux], wel.aux.get_data()[p][iaux]) assert not wel.has_stress_period_data q_nan = np.where(wel.q.array == DNODATA, np.nan, wel.q.array) @@ -2173,7 +2154,6 @@ def test_grid_array(function_tmpdir): # remove test wel package sim.write_simulation() - # assert False wel.remove() welqspd = {} @@ -2240,20 +2220,6 @@ def test_grid_array(function_tmpdir): aux_data_3 = wel.aux.get_data(3) assert aux_data_3 is None - # drnspdict = { - # 0: [[(0, 0, 0), 60.0, 10.0]], - # 2: [], - # 3: [[(0, 0, 0), 55.0, 5.0]], - # } - # drn = ModflowGwfdrn( - # model, - # print_input=True, - # print_flows=True, - # stress_period_data=drnspdict, - # save_flows=False, - # pname="DRN-1", - # ) - drnelevspd = {} drncondspd = {} for n in range(4): @@ -2290,32 +2256,47 @@ def test_grid_array(function_tmpdir): assert len(drn_cond_data) == 4 assert np.allclose(drn_elev_array[0], drn_elev_data[0]) assert np.allclose(drn_elev_array[0], drn.elev.get_data(0)) - - # drn_array = drn.stress_period_data.array - # assert drn_array[0][0][1] == 60.0 - # assert drn_array[1][0][1] == 60.0 - # assert drn_array[2] is None - # assert drn_array[3][0][1] == 55.0 - # drn_gd_0 = drn.stress_period_data.get_data(0) - # assert drn_gd_0[0][1] == 60.0 - # drn_gd_1 = drn.stress_period_data.get_data(1) - # assert drn_gd_1 is None - # drn_gd_2 = drn.stress_period_data.get_data(2) - # assert len(drn_gd_2) == 0 - # drn_gd_3 = drn.stress_period_data.get_data(3) - # assert drn_gd_3[0][1] == 55.0 - - ghbspdict = { - 0: [[(0, 1, 1), 60.0, 10.0]], - } - ghb = ModflowGwfghb( + for p in range(nper): + if p == 1: + assert drn_elev_data[p] is None + assert drn.elev.get_data(p) is None + else: + assert np.allclose(drn_elev_array[p], drn_elev_data[p]) + assert np.allclose(drn_elev_array[p], drn.elev.get_data()[p]) + assert np.allclose(drn_elev_array[p], drn.elev.get_data(p)) + + ghbbheadspd = {} + ghbcondspd = {} + bhead = np.full((nlay, nrow, ncol), DNODATA, dtype=float) + cond = np.full((nlay, nrow, ncol), DNODATA, dtype=float) + bhead[0, 1, 1] = 60.0 + cond[0, 1, 1] = 10.0 + ghbbheadspd[0] = bhead + ghbcondspd[0] = cond + + ghb = ModflowGwfghbg( model, print_input=True, print_flows=True, - stress_period_data=ghbspdict, save_flows=False, pname="GHB-1", - ) + bhead=ghbbheadspd, + cond=ghbcondspd, + ) + + ghb_bhead_array = ghb.bhead.array + assert np.allclose(ghb.bhead.array[0], ghb.bhead.get_data()[0]) + assert np.allclose(ghb.bhead.array[0], ghb.bhead.get_data(0)) + assert np.allclose(ghb.cond.array[0], ghb.cond.get_data()[0]) + assert np.allclose(ghb.cond.array[0], ghb.cond.get_data(0)) + for p in range(1, nper): + assert ghb.bhead.get_data()[p] is None + assert ghb.bhead.get_data(p) is None + assert ghb.cond.get_data()[p] is None + assert ghb.cond.get_data(p) is None + assert len(ghb_bhead_array[p]) == len(ghb_bhead_array[0]) + assert ghb_bhead_array[p].shape == ghb_bhead_array[0].shape + assert np.allclose(ghb_bhead_array[p], ghb_bhead_array[0]) lakpd = [(0, 70.0, 1), (1, 65.0, 1)] lakecn = [ @@ -2384,11 +2365,8 @@ def test_grid_array(function_tmpdir): ) # test writing and loading model - # print(wel.aux.array) sim.write_simulation() - # print(wel.aux.array) sim.run_simulation() - # print(wel.aux.array) test_sim = MFSimulation.load( sim_name, @@ -2402,6 +2380,7 @@ def test_grid_array(function_tmpdir): rcha = model.get_package("rcha") wel = model.get_package("wel") drn = model.get_package("drn") + ghb = model.get_package("ghb") lak = model.get_package("lak") lak_tab = model.get_package("laktab") assert os.path.split(dis.filename)[1] == f"{model_name}.dis" @@ -2435,7 +2414,6 @@ def test_grid_array(function_tmpdir): aux_data_3 = rcha.aux.get_data(3) assert aux_data_3[0][0][0] == 200.0 - # TODO wel_q_array = wel.q.array assert np.all(wel_q_array[0][0] == 0.0) assert wel_q_array[1][0][0, 0, 0] == 0.25 @@ -2447,8 +2425,6 @@ def test_grid_array(function_tmpdir): assert welg_q_per[2][0, 0, 0] == 0.1 assert welg_q_per[3] is None wel_aux_array = wel.aux.array - print(wel_aux_array) - # assert False assert np.all(wel_aux_array[0][0] == 0.0) assert wel_aux_array[1][0][0, 0, 0] == 0.0 assert wel_aux_array[2][0][0, 0, 0] == 0.0 @@ -2459,25 +2435,26 @@ def test_grid_array(function_tmpdir): assert welg_aux_per[2][0][0, 0, 0] == 0.0 assert welg_aux_per[3] is None - welg_q_per1 = wel.q.get_data(1) - # print(wel.q.array) - assert welg_q_per1[0, 0, 0] == 0.25 - welg_aux_per1 = wel.aux.get_data(1) - assert welg_aux_per1[0][0, 0, 0] == 0.0 - - # drn_array = drn.stress_period_data.array - # assert drn_array[0][0][1] == 60.0 - # assert drn_array[1][0][1] == 60.0 - # assert drn_array[2] is None - # assert drn_array[3][0][1] == 55.0 - # drn_gd_0 = drn.stress_period_data.get_data(0) - # assert drn_gd_0[0][1] == 60.0 - # drn_gd_1 = drn.stress_period_data.get_data(1) - # assert drn_gd_1 is None - # drn_gd_2 = drn.stress_period_data.get_data(2) - # assert len(drn_gd_2) == 0 - # drn_gd_3 = drn.stress_period_data.get_data(3) - # assert drn_gd_3[0][1] == 55.0 + # first axis is nper, second is naux, then grid + drn_elev_array = drn.elev.array + assert drn_elev_array[0][0][0, 0, 0] == 60.0 + assert drn_elev_array[1][0][0, 0, 0] == 60.0 + assert drn_elev_array[2][0][0, 0, 0] == DNODATA + assert drn_elev_array[3][0][0, 0, 0] == 55.0 + assert np.allclose(drn_elev_array[0], drn.elev.get_data(0)) + assert drn.elev.get_data(1) is None + assert np.allclose(drn_elev_array[2], drn.elev.get_data(2)) + assert np.allclose(drn_elev_array[3], drn.elev.get_data(3)) + + ghb_bhead_array = ghb.bhead.array + assert ghb_bhead_array[0][0][0, 1, 1] == 60.0 + assert ghb_bhead_array[1][0][0, 1, 1] == 60.0 + assert ghb_bhead_array[2][0][0, 1, 1] == 60.0 + assert ghb_bhead_array[3][0][0, 1, 1] == 60.0 + assert np.allclose(ghb_bhead_array[0], ghb.bhead.get_data(0)) + assert ghb.bhead.get_data(1) is None + assert ghb.bhead.get_data(2) is None + assert ghb.bhead.get_data(3) is None lak_tab_array = lak.tables.get_data() assert lak_tab_array[0][1] == "lak01.tab" diff --git a/flopy/mf6/data/mfdatastorage.py b/flopy/mf6/data/mfdatastorage.py index 72274dbb64..477530b7bc 100644 --- a/flopy/mf6/data/mfdatastorage.py +++ b/flopy/mf6/data/mfdatastorage.py @@ -2462,6 +2462,10 @@ def _build_full_data(self, apply_multiplier=False): or not self._has_layer_dim() ): full_data = self._fill_const_layer(layer) * mult + elif grid_aux: + ilayer = (layer[0]) % nlay + iaux = int((layer[0]) / nlay) + full_data[iaux][ilayer] = self._fill_const_layer(layer)[0] * mult else: full_data[layer] = self._fill_const_layer(layer) * mult else: diff --git a/flopy/mf6/data/mffileaccess.py b/flopy/mf6/data/mffileaccess.py index 5e6f885865..d827b71892 100644 --- a/flopy/mf6/data/mffileaccess.py +++ b/flopy/mf6/data/mffileaccess.py @@ -778,7 +778,6 @@ def load_from_package( storage.init_layers(layers) elif grid_aux: - #if layer_shape[0] > 1: storage.layered = True auxidx = self._get_aux_var_index(arr_line[0]) if auxidx == 0: From 5735ecfc943404a3d7b44a7eca9bd0e94578efe0 Mon Sep 17 00:00:00 2001 From: mjreno Date: Mon, 15 Sep 2025 08:55:41 -0400 Subject: [PATCH 33/44] edit netcdf tutorial --- .docs/Notebooks/mf6_netcdf01_tutorial.py | 131 +++++++++++++---------- 1 file changed, 77 insertions(+), 54 deletions(-) diff --git a/.docs/Notebooks/mf6_netcdf01_tutorial.py b/.docs/Notebooks/mf6_netcdf01_tutorial.py index 69067f0a18..a81f213a8d 100644 --- a/.docs/Notebooks/mf6_netcdf01_tutorial.py +++ b/.docs/Notebooks/mf6_netcdf01_tutorial.py @@ -7,24 +7,27 @@ # extension: .py # format_name: light # format_version: '1.5' -# jupytext_version: 1.16.4 +# jupytext_version: 1.17.2 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- -# # MODFLOW 6: Generate MODFLOW 6 NetCDF input from existing FloPy sim +# # MODFLOW 6: MODFLOW 6 NetCDF inputs from FloPy simulation # -# ## NetCDF tutorial 1: MODFLOW 6 structured input file +# ## Write MODFLOW 6 NetCDF simulation # -# This tutorial shows how to generate a MODFLOW 6 NetCDF file from -# an existing FloPy simulation. Two methods will be demonstrated that -# generate a simulation with package data stored in a model NetCDF -# file. The first method is non-interactive- FloPy will generate the -# file with a modified `write_simulation()` call. The second method -# is interactive, which provides an oppurtinity to modify the dataset -# before it is written to NetCDF. +# This tutorial demonstrates how to generate a MODFLOW 6 NetCDF file +# from an existing FloPy simulation. Two variations will be shown. +# In the first, FloPy will generate the file with a modified +# `write_simulation()` call. The second method is more interactive, +# providing an opputinity to modify the dataset before it is written +# to NetCDF. +# +# Support for generating a MODFLOW 6 NetCDF input without a defined +# FloPy mf6 model or package instances is briefly discussed at the +# end of the tutorial. # # For more information on supported MODFLOW 6 NetCDF formats see: # [MODFLOW NetCDF Format](https://github.com/MODFLOW-ORG/modflow6/wiki/MODFLOW-NetCDF-Format). @@ -93,11 +96,15 @@ # ## Load and run baseline simulation # -# For the purposes of this tutorial, the specifics of this simulation +# For the purposes of this tutorial, the specifics of the simulation # other than it is a candidate for NetCDF input are not a focus. It # is a NetCDF input candidate because it defines a supported model type # (`GWF6`) with a structured discretization and packages that support # NetCDF input parameters. +# +# More information about package NetCDF support in MODFLOW 6 can be +# found in the `MODFLOW 6 - Description of Input and Output` (`mf6io.pdf`), +# also available at the nightly build repository linked above. # load and run the non-netcdf simulation sim = flopy.mf6.MFSimulation.load(sim_ws=data_path / sim_name) @@ -108,11 +115,11 @@ # ## Create NetCDF based simulation method 1 # -# This is the most straightforward way to create a NetCDF simulation -# from the loaded ascii input simulation. Simply define the `netcdf` -# argument to `write_simulation()` to be either `structured` or -# `layered`, depending on the desired format of the generated NetCDF -# file. +# The most straightforward way to create a NetCDF simulation +# from the loaded simulation is to provide a `netcdf` argument +# to `write_simulation()` and define it to be either `structured` +# or `layered`, depending on the desired format of the generated +# NetCDF file. # # The name of the created file can be specified by first setting the # model `name_file.nc_filerecord` attribute to the desired name. If @@ -129,9 +136,9 @@ # ## Run MODFLOW 6 simulation with NetCDF input # -# The simulation generated by this tutorial should be runnable by -# Extended MODFLOW 6, available from the nightly-build repository -# (linked above). +# The simulation generated by this tutorial should run with the +# Extended MODFLOW 6 executable, available from the nightly-build +# repository (linked above). # success, buff = sim.run_simulation(silent=True, report=True) # assert success, pformat(buff) @@ -148,29 +155,31 @@ # ## Run MODFLOW 6 simulation with NetCDF input # -# The simulation generated by this tutorial should be runnable by -# Extended MODFLOW 6, available from the nightly-build repository -# (linked above). +# The simulation generated by this tutorial should run with the +# Extended MODFLOW 6 executable, available from the nightly-build +# repository (linked above). # success, buff = sim.run_simulation(silent=True, report=True) # assert success, pformat(buff) # ## Create NetCDF based simulation method 2 # +# In this method we will set the FloPy `netcdf` argument to `nofile` +# when `write_simulation()` is called. As such, FloPy will not generate +# the NetCDF file automatically. We will manage the NetCDF file +# generation ourselves in method 2. +# # Reset the simulation path and set the `GWF` name file `nc_filerecord` # attribute to the name of the intended input NetCDF file. Display # the resultant name file changes. # # When we write the updated simulation, all packages that support NetCDF -# input parameters will be converted. We will therefore need to create a +# input parameters will be written such that NetCDF parameters expect the +# data source to be a NetCDF file. We will therefore need to create a # NetCDF input file containing arrays for the `DIS`, `NPF`, `IC`, `STO`, -# and `GHBG` packages. Data will be copied from the package objects into -# dataset arrays. -# -# Flopy will not generate the NetCDF input file when the `netcdf` argument -# to `write_simulation()` is set to `nofile`. This step is needed, however, -# to update ascii input with the keywords required to support the model -# NetCDF file that we will generate. +# and `GHBG` packages. We will still use FloPy package objects to set the +# parameter data in the dataset; however an `update_data=False` argument +# could be passed to the `update_dataset()` call if this was not desired. # create directory for netcdf sim sim.set_sim_path(workspace / "netcdf3") @@ -205,17 +214,28 @@ # ## Access model NetCDF attributes # -# Access model scoped NetCDF details by storing the dictionary -# returned from `netcdf_info()`. In particular, we need to set dataset -# scoped attributes that are stored in the model netcdf info dict. +# Internally, FloPy generates and uses NetCDF metadata dictionaries +# to update datasets. Both model and package objects can generate +# the dictionaries and they contain related but distinct NetCDF +# details. Model object dictionaries contain file scoped attribute +# information while package dictionaries are organized by NetCDF +# variable and contain variable scoped attribute information and +# details related to creating the variable, including dimensions, +# shape and data type. # -# First, retrieve and store the netcdf info dictionary and display +# The dictionaries are available via the `netcdf_info()` call. Their +# content also varies depending on the desired type of dataset (i.e. +# `structured` or `layered`). In this step we will access the model +# NetCDF metadata and use it to update dataset scoped attributes. +# +# First, retrieve and store the netcdf metadata dictionary and display # its contents. Then, in the following step, update the dataset with # the model scoped attributes defined in the dictionary. # -# These 2 operations can also be accomplished by calling `update_dataset()` -# on the model object. Analogous functions for the package are shown -# below. +# These operations can also both be accomplished by calling +# `update_dataset()` on the model object, albeit without the +# opportunity to modify the intermediate metadata dictionary. +# Examples of this approach (with package objects) are shown below. # get model netcdf info nc_info = gwf.netcdf_info() @@ -227,11 +247,11 @@ # ## Update the dataset with supported `DIS` arrays # -# Add NetCDF supported data arrays in package to dataset. Internally, this call -# uses a `netcdf_info()` package dictionary to determine candidate variables -# and relevant information about them. Alternatively, this dictionary can -# be directly accessed, updated, and passed to the `update_dataset()` function. -# That workflow will be demonstrated in the `NPF` package update which follows. +# First, we will show how package NetCDF parameters can be added to the +# dataset without using the NetCDF metadata dictionary. We will use the +# metadata dictionary when updating the dataset with NPF parameter data. +# +# Add NetCDF supported data arrays in package to dataset. # update dataset with `DIS` arrays dis = gwf.get_package("dis") @@ -240,9 +260,7 @@ # ## Access `NPF` package NetCDF attributes # # Access package scoped NetCDF details by storing the dictionary returned -# from `netcdf_info()`. We need to set package variable attributes that are -# stored in the package netcdf info dict, but we also need other information -# that is relevant to creating the variables themselves. +# from `netcdf_info()`. # # The contents of the info dictionary are shown and then, in the following # step, the dictionary and the dataset are passed to a helper routine that @@ -253,12 +271,17 @@ nc_info = npf.netcdf_info() pprint(nc_info) -# ## Update package `netcdf_info` dictionary and dataset +# ## Update package NetCDF metadata dictionary and dataset +# +# Here we update the metadata dictionary and then pass it directly to the +# `update_dataset()` function which uses it when adding variables to the +# dataset. # -# Here we replace the default name for the `NPF K` input parameter and add -# the `standard_name` attribute to it's attribute dictionary. The dictionary -# is then passed to the `update_dataset()` function. Note the updated name -# is used in the subsequent block when updating the array values. +# We replace the default name for the `NPF K` input parameter and add +# the `standard_name` attribute to it's attribute dictionary. The +# dictionary is then passed to the `update_dataset()` function. Note the +# updated name is used in the subsequent block when updating the array +# values. # update dataset with `NPF` arrays nc_info["k"]["varname"] = "npf_k_updated" @@ -302,14 +325,14 @@ # ## Run MODFLOW 6 simulation with NetCDF input # -# The simulation generated by this tutorial should be runnable by -# Extended MODFLOW 6, available from the nightly-build repository -# (linked above). +# The simulation generated by this tutorial should run with the +# Extended MODFLOW 6 executable, available from the nightly-build +# repository (linked above). # success, buff = sim.run_simulation(silent=True, report=True) # assert success, pformat(buff) -# ## Method 4: DIY with xarray +# ## Method 3: DIY with xarray # # The above method still uses FloPy objects to update the dataset arrays # to values consistent with the state of the objects. The `netcdf_info` From 30a9643adc8e55b73265e4ed879c1f4e7a82dafe Mon Sep 17 00:00:00 2001 From: mjreno Date: Mon, 15 Sep 2025 09:20:48 -0400 Subject: [PATCH 34/44] fix merge --- flopy/mf6/utils/codegen/filters.py | 17 +---------------- 1 file changed, 1 insertion(+), 16 deletions(-) diff --git a/flopy/mf6/utils/codegen/filters.py b/flopy/mf6/utils/codegen/filters.py index d0737543cf..8c0c6b7857 100644 --- a/flopy/mf6/utils/codegen/filters.py +++ b/flopy/mf6/utils/codegen/filters.py @@ -339,22 +339,7 @@ def _var(var: dict) -> List[str]: if k not in exclude ] - def __dfn(): - def _var(var: dict) -> List[str]: - exclude = ["description"] - name = var["name"] - subpkg = dfn.get("fkeys", dict()).get(name, None) - if subpkg: - var["construct_package"] = subpkg["abbr"] - var["construct_data"] = subpkg["val"] - var["parameter_name"] = subpkg["param"] - return [ - " ".join([k, v]).strip() - for k, v in var.items() - if k not in exclude - ] - - return [_var(var) for var in list(definition.values(multi=True))] + return [_var(var) for var in list(definition.values(multi=True))] return [["header"] + _meta()] + __dfn() From 2daca15af660630a44c1978157b170c5b5b63efe Mon Sep 17 00:00:00 2001 From: mjreno Date: Mon, 15 Sep 2025 09:27:20 -0400 Subject: [PATCH 35/44] lint --- flopy/mf6/mfpackage.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flopy/mf6/mfpackage.py b/flopy/mf6/mfpackage.py index 7476b96cd5..781be275c7 100644 --- a/flopy/mf6/mfpackage.py +++ b/flopy/mf6/mfpackage.py @@ -3623,7 +3623,7 @@ def netcdf_info(self, mesh=None): if self.dimensions.get_aux_variables(): auxnames = list(self.dimensions.get_aux_variables()[0]) - if len(auxnames) and auxnames[0] == "auxiliary": + if len(auxnames) > 0 and auxnames[0] == "auxiliary": auxnames.pop(0) else: auxnames = [] From 65a4df879e65b0a33ebdd98439d9ce8f18689c91 Mon Sep 17 00:00:00 2001 From: mjreno Date: Mon, 15 Sep 2025 10:42:49 -0400 Subject: [PATCH 36/44] fix merge --- flopy/mf6/utils/codegen/filters.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flopy/mf6/utils/codegen/filters.py b/flopy/mf6/utils/codegen/filters.py index 8c0c6b7857..f08d04a238 100644 --- a/flopy/mf6/utils/codegen/filters.py +++ b/flopy/mf6/utils/codegen/filters.py @@ -326,7 +326,7 @@ def _meta(): def __dfn(): def _var(var: dict) -> List[str]: - exclude = ["longname", "description"] + exclude = ["description"] name = var["name"] subpkg = dfn.get("fkeys", dict()).get(name, None) if subpkg: From 4633ae43238739863d0f7142d32c9a84b264989d Mon Sep 17 00:00:00 2001 From: mjreno Date: Fri, 19 Sep 2025 14:22:09 -0400 Subject: [PATCH 37/44] support NCF encodings and grid mappings --- flopy/discretization/grid.py | 3 +- flopy/discretization/structuredgrid.py | 39 +++++- flopy/discretization/vertexgrid.py | 19 ++- flopy/mf6/mfmodel.py | 162 ++++++++++++++++++++----- flopy/mf6/mfpackage.py | 11 ++ flopy/utils/datautil.py | 7 +- 6 files changed, 206 insertions(+), 35 deletions(-) diff --git a/flopy/discretization/grid.py b/flopy/discretization/grid.py index c61968ad0a..d46f8577f1 100644 --- a/flopy/discretization/grid.py +++ b/flopy/discretization/grid.py @@ -1288,7 +1288,7 @@ def write_shapefile(self, filename="grid.shp", crs=None, prjfile=None, **kwargs) ) return - def dataset(self, modeltime=None, mesh=None): + def dataset(self, modeltime=None, mesh=None, encoding=None): """ Method to generate baseline xarray dataset @@ -1296,6 +1296,7 @@ def dataset(self, modeltime=None, mesh=None): ---------- modeltime : FloPy ModelTime object mesh : mesh type + encoding : variable encoding dictionary Returns ------- diff --git a/flopy/discretization/structuredgrid.py b/flopy/discretization/structuredgrid.py index 49298b9a98..e511cbf8b0 100644 --- a/flopy/discretization/structuredgrid.py +++ b/flopy/discretization/structuredgrid.py @@ -1770,11 +1770,12 @@ def get_plottable_layer_array(self, a, layer): assert plotarray.shape == required_shape, msg return plotarray - def dataset(self, modeltime=None, mesh=None): + def dataset(self, modeltime=None, mesh=None, encoding=None): """ modeltime : FloPy ModelTime object mesh : mesh type valid mesh types are "layered" or None + encoding : variable encoding dictionary """ from ..utils import import_optional_dependency @@ -1787,11 +1788,11 @@ def dataset(self, modeltime=None, mesh=None): ds.attrs["modflow_grid"] = "STRUCTURED" if mesh and mesh.upper() == "LAYERED": - return self._layered_mesh_dataset(ds, modeltime) + return self._layered_mesh_dataset(ds, modeltime, encoding) elif mesh is None: - return self._structured_dataset(ds, modeltime) + return self._structured_dataset(ds, modeltime, encoding) - def _layered_mesh_dataset(self, ds, modeltime=None): + def _layered_mesh_dataset(self, ds, modeltime=None, encoding=None): FILLNA_INT32 = np.int32(-2147483647) FILLNA_DBL = 9.96920996838687e36 lenunits = {0: "m", 1: "ft", 2: "m", 3: "m"} @@ -1894,9 +1895,25 @@ def _layered_mesh_dataset(self, ds, modeltime=None): ds["mesh_face_nodes"].attrs["_FillValue"] = FILLNA_INT32 ds["mesh_face_nodes"].attrs["start_index"] = np.int32(1) + if encoding is not None and "wkt" in encoding and encoding["wkt"] is not None: + ds = ds.assign({"projection": ([], np.int32(1))}) + # wkt override to existing crs + ds["projection"].attrs["wkt"] = encoding["wkt"] + ds["mesh_node_x"].attrs["grid_mapping"] = "projection" + ds["mesh_node_y"].attrs["grid_mapping"] = "projection" + ds["mesh_face_x"].attrs["grid_mapping"] = "projection" + ds["mesh_face_y"].attrs["grid_mapping"] = "projection" + elif self.crs is not None: + ds = ds.assign({"projection": ([], np.int32(1))}) + ds["projection"].attrs["wkt"] = self.crs.to_wkt() + ds["mesh_node_x"].attrs["grid_mapping"] = "projection" + ds["mesh_node_y"].attrs["grid_mapping"] = "projection" + ds["mesh_face_x"].attrs["grid_mapping"] = "projection" + ds["mesh_face_y"].attrs["grid_mapping"] = "projection" + return ds - def _structured_dataset(self, ds, modeltime=None): + def _structured_dataset(self, ds, modeltime=None, encoding=None): lenunits = {0: "m", 1: "ft", 2: "m", 3: "m"} x = self.xoffset + self.xycenters[0] @@ -1953,6 +1970,18 @@ def _structured_dataset(self, ds, modeltime=None): ds["x"].attrs["long_name"] = "Easting" ds["x"].attrs["bounds"] = "x_bnds" + if encoding is not None and "wkt" in encoding and encoding["wkt"] is not None: + ds = ds.assign({"projection": ([], np.int32(1))}) + # wkt override to existing crs + ds["projection"].attrs["crs_wkt"] = encoding["wkt"] + ds["x"].attrs["grid_mapping"] = "projection" + ds["y"].attrs["grid_mapping"] = "projection" + elif self.crs is not None: + ds = ds.assign({"projection": ([], np.int32(1))}) + ds["projection"].attrs["crs_wkt"] = self.crs.to_wkt() + ds["x"].attrs["grid_mapping"] = "projection" + ds["y"].attrs["grid_mapping"] = "projection" + return ds def _set_structured_iverts(self): diff --git a/flopy/discretization/vertexgrid.py b/flopy/discretization/vertexgrid.py index 47f0c17b36..854d41c67e 100644 --- a/flopy/discretization/vertexgrid.py +++ b/flopy/discretization/vertexgrid.py @@ -600,12 +600,13 @@ def get_plottable_layer_array(self, a, layer): assert plotarray.shape == required_shape, msg return plotarray - def dataset(self, modeltime=None, mesh=None): + def dataset(self, modeltime=None, mesh=None, encoding=None): """ modeltime : FloPy ModelTime object mesh : mesh type valid mesh types are "layered" or None VertexGrid objects only support layered mesh + encoding : variable encoding dictionary """ from ..utils import import_optional_dependency @@ -717,6 +718,22 @@ def dataset(self, modeltime=None, mesh=None): ds["mesh_face_nodes"].attrs["_FillValue"] = FILLNA_INT32 ds["mesh_face_nodes"].attrs["start_index"] = np.int32(1) + if encoding is not None and "wkt" in encoding and encoding["wkt"] is not None: + ds = ds.assign({"projection": ([], np.int32(1))}) + # wkt override to existing crs + ds["projection"].attrs["wkt"] = encoding["wkt"] + ds["mesh_node_x"].attrs["grid_mapping"] = "projection" + ds["mesh_node_y"].attrs["grid_mapping"] = "projection" + ds["mesh_face_x"].attrs["grid_mapping"] = "projection" + ds["mesh_face_y"].attrs["grid_mapping"] = "projection" + elif self.crs is not None: + ds = ds.assign({"projection": ([], np.int32(1))}) + ds["projection"].attrs["wkt"] = self.crs.to_wkt() + ds["mesh_node_x"].attrs["grid_mapping"] = "projection" + ds["mesh_node_y"].attrs["grid_mapping"] = "projection" + ds["mesh_face_x"].attrs["grid_mapping"] = "projection" + ds["mesh_face_y"].attrs["grid_mapping"] = "projection" + return ds # initialize grid from a grb file diff --git a/flopy/mf6/mfmodel.py b/flopy/mf6/mfmodel.py index 6047b56b00..52bb82661d 100644 --- a/flopy/mf6/mfmodel.py +++ b/flopy/mf6/mfmodel.py @@ -1313,7 +1313,6 @@ def write( ext_file_action=ExtFileAction.copy_relative_paths, netcdf=None, ): - from ..version import __version__ """ Writes out model's package files. @@ -1359,7 +1358,10 @@ def write( if write_netcdf: # set data storage to write ascii for netcdf pp._set_netcdf_storage() - + if pp.package_type.startswith("dis"): + crs = pp.crs.get_data() + if crs is not None and self.modelgrid.crs is None: + self.modelgrid.crs = crs[0][1] if ( self.simulation_data.verbosity_level.value >= VerbosityLevel.normal.value @@ -1372,35 +1374,11 @@ def write( # write netcdf file if write_netcdf and netcdf.lower() != "nofile": - mesh = netcdf - if mesh.upper() == "STRUCTURED": - mesh = None - - ds = self.modelgrid.dataset( - modeltime=self.modeltime, - mesh=mesh, - ) - - nc_info = self.netcdf_info(mesh=mesh) - nc_info["attrs"]["title"] = f"{self.name.upper()} input" - nc_info["attrs"]["source"] = f"flopy {__version__}" - # :history = "first created 2025/8/21 9:46:2.909" ; - # :Conventions = "CF-1.11 UGRID-1.0" ; - ds = self.update_dataset(ds, netcdf_info=nc_info, mesh=mesh) - - # write dataset to netcdf - fname = self.name_file.nc_filerecord.get_data()[0][0] - ds.to_netcdf( - os.path.join(self.model_ws, fname), - format="NETCDF4", - engine="netcdf4" - ) - + self._write_netcdf(mesh=netcdf) if nc_fname is not None: self.name_file.nc_filerecord = None - def get_grid_type(self): """ Return the type of grid used by model 'model_name' in simulation @@ -2316,12 +2294,142 @@ def update_dataset(self, dataset, netcdf_info=None, mesh=None, update_data=True) else: nc_info = netcdf_info + if ( + self.simulation.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + print(f" updating model dataset...") + for a in nc_info["attrs"]: dataset.attrs[a] = nc_info["attrs"][a] # add all packages and update data for p in self.packagelist: # add package var to dataset + if ( + self.simulation.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + print(f" updating dataset for package {p._get_pname()}...") dataset = p.update_dataset(dataset, mesh=mesh, update_data=update_data) return dataset + + def _write_netcdf(self, mesh=None): + import datetime + + from ..version import __version__ + if mesh is not None and mesh.upper() == "STRUCTURED": + mesh = None + + encode = {} + for pp in self.packagelist: + if pp.package_type == "ncf": + encode["shuffle"] = pp.shuffle.get_data() + encode["deflate"] = pp.deflate.get_data() + encode["chunk_time"] = pp.chunk_time.get_data() + encode["chunk_face"] = pp.chunk_face.get_data() + encode["chunk_x"] = pp.chunk_x.get_data() + encode["chunk_y"] = pp.chunk_y.get_data() + encode["chunk_z"] = pp.chunk_z.get_data() + wkt = pp.wkt.get_data() + if wkt is not None: + wkt = wkt[0][1] + encode["wkt"] = wkt + + if ( + self.simulation.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + print(f" creating model dataset...") + + ds = self.modelgrid.dataset( + modeltime=self.modeltime, + mesh=mesh, + encoding=encode, + ) + + dt = datetime.datetime.now() + timestamp = dt.strftime("%m/%d/%Y %H:%M:%S") + + nc_info = self.netcdf_info(mesh=mesh) + nc_info["attrs"]["title"] = f"{self.name.upper()} input" + nc_info["attrs"]["source"] = f"flopy {__version__}" + nc_info["attrs"]["history"] = f"first created {timestamp}" + if mesh is None: + nc_info["attrs"]["Conventions"] = "CF-1.11" + elif mesh.upper() is "LAYERED": + nc_info["attrs"]["Conventions"] = "CF-1.11 UGRID-1.0" + + ds = self.update_dataset( + ds, + netcdf_info=nc_info, + mesh=mesh, + ) + + chunk = False + chunk_t = False + if mesh is None: + if ( + "chunk_x" in encode + and encode["chunk_x"] is not None + and "chunk_y" in encode + and encode["chunk_y"] is not None + and "chunk_z" in encode + and encode["chunk_z"] is not None + ): + chunk = True + elif mesh.upper() == "LAYERED": + if "chunk_face" in encode and encode["chunk_face"] is not None: + chunk = True + if "chunk_time" in encode and encode["chunk_time"] is not None: + chunk_t = True + + base_encode = {} + if "deflate" in encode and encode["deflate"] is not None: + base_encode["zlib"] = True + base_encode["complevel"] = encode["deflate"] + if "shuffle" in encode and encode["deflate"] is not None: + base_encode["shuffle"] = True + + encoding = {} + chunk_dims = {'time', 'nmesh_face', 'z', 'y', 'x'} + for varname, da in ds.data_vars.items(): + dims = ds.data_vars[varname].dims + codes = dict(base_encode) + if ( + not set(dims).issubset(chunk_dims) + or not chunk or not chunk_t + ): + encoding[varname] = codes + continue + chunksizes = [] + if "time" in dims: + chunksizes.append(encode["chunk_time"]) + if mesh is None: + if "z" in dims: + chunksizes.append(encode["chunk_z"]) + if "y" in dims: + chunksizes.append(encode["chunk_y"]) + if "x" in dims: + chunksizes.append(encode["chunk_x"]) + elif mesh.upper() == "LAYERED" and "nmesh_face" in dims: + chunksizes.append(encode["chunk_face"]) + if len(chunksizes) > 0: + codes["chunksizes"] = chunksizes + encoding[varname] = codes + + fname = self.name_file.nc_filerecord.get_data()[0][0] + + if ( + self.simulation.simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + print(f" writing NetCDF file {fname}...") + # write dataset to netcdf + ds.to_netcdf( + os.path.join(self.model_ws, fname), + format="NETCDF4", + engine="netcdf4", + encoding=encoding, + ) diff --git a/flopy/mf6/mfpackage.py b/flopy/mf6/mfpackage.py index 781be275c7..67ae944a16 100644 --- a/flopy/mf6/mfpackage.py +++ b/flopy/mf6/mfpackage.py @@ -3760,6 +3760,8 @@ def _data_shape(shape): return dims_l + projection = "projection" in dataset.data_vars + last_path = '' pitem = None pdata = None @@ -3774,6 +3776,15 @@ def _data_shape(shape): dataset = dataset.assign(var_d) for a in nc_info[v]["attrs"]: dataset[varname].attrs[a] = nc_info[v]["attrs"][a] + if projection: + dims = dataset[varname].dims + if "nmesh_face" in dims or "nmesh_node" in dims: + dataset[varname].attrs["grid_mapping"] = "projection" + dataset[varname].attrs["coordinates"] = "mesh_face_x mesh_face_y" + elif mesh is None and len(dims) > 1: + # TODO don't set if lon / lat? + dataset[varname].attrs["grid_mapping"] = "projection" + dataset[varname].attrs["coordinates"] = "x y" if update_data: path = nc_info[v]["attrs"]["modflow_input"] diff --git a/flopy/utils/datautil.py b/flopy/utils/datautil.py index 011a4323df..1acbb8dec8 100644 --- a/flopy/utils/datautil.py +++ b/flopy/utils/datautil.py @@ -305,6 +305,7 @@ def reset_delimiter_used(): @staticmethod def split_data_line(line, external_file=False, delimiter_conf_length=15): + no_split_keys = ['crs', 'wkt'] if PyListUtil.line_num > delimiter_conf_length and PyListUtil.consistent_delim: # consistent delimiter has been found. continue using that # delimiter without doing further checks @@ -358,7 +359,11 @@ def split_data_line(line, external_file=False, delimiter_conf_length=15): max_split_size = len(max_split_list) max_split_type = "combo" - if max_split_type is not None and max_split_size > 1: + if ( + max_split_type is not None + and max_split_size > 1 + and clean_line[0].lower() not in no_split_keys + ): clean_line = max_split_list if PyListUtil.line_num == 0: PyListUtil.delimiter_used = max_split_type From 43cfd648e91ebde1d06e99bc62123638d9b9e9ac Mon Sep 17 00:00:00 2001 From: mjreno Date: Fri, 19 Sep 2025 14:32:04 -0400 Subject: [PATCH 38/44] check crs attr --- flopy/mf6/mfmodel.py | 5 ++++- flopy/utils/datautil.py | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/flopy/mf6/mfmodel.py b/flopy/mf6/mfmodel.py index 52bb82661d..289b556cf3 100644 --- a/flopy/mf6/mfmodel.py +++ b/flopy/mf6/mfmodel.py @@ -1358,7 +1358,10 @@ def write( if write_netcdf: # set data storage to write ascii for netcdf pp._set_netcdf_storage() - if pp.package_type.startswith("dis"): + if ( + pp.package_type.startswith("dis") + and hasattr(pp, "crs") + ): crs = pp.crs.get_data() if crs is not None and self.modelgrid.crs is None: self.modelgrid.crs = crs[0][1] diff --git a/flopy/utils/datautil.py b/flopy/utils/datautil.py index 1acbb8dec8..e198773d42 100644 --- a/flopy/utils/datautil.py +++ b/flopy/utils/datautil.py @@ -305,7 +305,7 @@ def reset_delimiter_used(): @staticmethod def split_data_line(line, external_file=False, delimiter_conf_length=15): - no_split_keys = ['crs', 'wkt'] + no_split_keys = ["crs", "wkt"] if PyListUtil.line_num > delimiter_conf_length and PyListUtil.consistent_delim: # consistent delimiter has been found. continue using that # delimiter without doing further checks From b367bd62e74b3382a254c91d9fe9f4d4ac2df20b Mon Sep 17 00:00:00 2001 From: mjreno Date: Mon, 22 Sep 2025 15:04:51 -0400 Subject: [PATCH 39/44] support structured lat/lon --- .docs/Notebooks/mf6_netcdf01_tutorial.py | 42 ++--- autotest/regression/test_model_netcdf.py | 30 ++-- flopy/discretization/grid.py | 4 +- flopy/discretization/structuredgrid.py | 119 ++++++++++---- flopy/discretization/vertexgrid.py | 26 +-- flopy/mf6/mfmodel.py | 101 ++++++------ flopy/mf6/mfpackage.py | 198 ++++++++++++----------- flopy/mf6/mfsimbase.py | 2 +- 8 files changed, 300 insertions(+), 222 deletions(-) diff --git a/.docs/Notebooks/mf6_netcdf01_tutorial.py b/.docs/Notebooks/mf6_netcdf01_tutorial.py index a81f213a8d..9ecc33c9a8 100644 --- a/.docs/Notebooks/mf6_netcdf01_tutorial.py +++ b/.docs/Notebooks/mf6_netcdf01_tutorial.py @@ -164,10 +164,10 @@ # ## Create NetCDF based simulation method 2 # -# In this method we will set the FloPy `netcdf` argument to `nofile` -# when `write_simulation()` is called. As such, FloPy will not generate -# the NetCDF file automatically. We will manage the NetCDF file -# generation ourselves in method 2. +# In this method we will set the FloPy `netcdf` argument to `` (empty +# string) when `write_simulation()` is called. As such, FloPy will +# not generate the NetCDF file automatically. We will manage the NetCDF +# file generation ourselves in method 2. # # Reset the simulation path and set the `GWF` name file `nc_filerecord` # attribute to the name of the intended input NetCDF file. Display @@ -188,7 +188,7 @@ gwf.name_file.nc_filerecord = "uzf01.structured.nc" # write simulation with ASCII inputs tagged for NetCDF # but do not create NetCDF file -sim.write_simulation(netcdf="nofile") +sim.write_simulation(netcdf="") # ## Show name file with NetCDF input configured @@ -223,7 +223,7 @@ # details related to creating the variable, including dimensions, # shape and data type. # -# The dictionaries are available via the `netcdf_info()` call. Their +# The dictionaries are available via the `netcdf_meta()` call. Their # content also varies depending on the desired type of dataset (i.e. # `structured` or `layered`). In this step we will access the model # NetCDF metadata and use it to update dataset scoped attributes. @@ -238,12 +238,12 @@ # Examples of this approach (with package objects) are shown below. # get model netcdf info -nc_info = gwf.netcdf_info() -pprint(nc_info) +nc_meta = gwf.netcdf_meta() +pprint(nc_meta) # update dataset directly with required attributes -for a in nc_info["attrs"]: - ds.attrs[a] = nc_info["attrs"][a] +for a in nc_meta["attrs"]: + ds.attrs[a] = nc_meta["attrs"][a] # ## Update the dataset with supported `DIS` arrays # @@ -260,7 +260,7 @@ # ## Access `NPF` package NetCDF attributes # # Access package scoped NetCDF details by storing the dictionary returned -# from `netcdf_info()`. +# from `netcdf_meta()`. # # The contents of the info dictionary are shown and then, in the following # step, the dictionary and the dataset are passed to a helper routine that @@ -268,8 +268,8 @@ # get npf package netcdf info npf = gwf.get_package("npf") -nc_info = npf.netcdf_info() -pprint(nc_info) +nc_meta = npf.netcdf_meta() +pprint(nc_meta) # ## Update package NetCDF metadata dictionary and dataset # @@ -284,9 +284,9 @@ # values. # update dataset with `NPF` arrays -nc_info["k"]["varname"] = "npf_k_updated" -nc_info["k"]["attrs"]["standard_name"] = "soil_hydraulic_conductivity_at_saturation" -ds = npf.update_dataset(ds, netcdf_info=nc_info) +nc_meta["k"]["varname"] = "npf_k_updated" +nc_meta["k"]["attrs"]["standard_name"] = "soil_hydraulic_conductivity_at_saturation" +ds = npf.update_dataset(ds, netcdf_meta=nc_meta) # ## Show dataset `NPF K` parameter with updates @@ -335,7 +335,7 @@ # ## Method 3: DIY with xarray # # The above method still uses FloPy objects to update the dataset arrays -# to values consistent with the state of the objects. The `netcdf_info` +# to values consistent with the state of the objects. The `netcdf_meta` # dictionary is intended to supported creation of the dataset without # an existing simulation defined. The base dataset can be defined with # `modelgrid` and `modeltime` objects, while the full package netcdf @@ -345,16 +345,16 @@ # ## Demonstrate static call on MFPackage (structured dataset): -netcdf_info = flopy.mf6.mfpackage.MFPackage.netcdf_package( +netcdf_meta = flopy.mf6.mfpackage.MFPackage.netcdf_package( mtype="GWF", ptype="NPF", auxiliary=["CONCENTRATION"], ) -pprint(netcdf_info) +pprint(netcdf_meta) # ## Demonstrate static call on MFPackage (layered dataset): -netcdf_info = flopy.mf6.mfpackage.MFPackage.netcdf_package( +netcdf_meta = flopy.mf6.mfpackage.MFPackage.netcdf_package( mtype="GWF", ptype="NPF", mesh="LAYERED", auxiliary=["CONCENTRATION"], nlay=2 ) -pprint(netcdf_info) +pprint(netcdf_meta) diff --git a/autotest/regression/test_model_netcdf.py b/autotest/regression/test_model_netcdf.py index 6539ff7a34..0f906f7fc4 100644 --- a/autotest/regression/test_model_netcdf.py +++ b/autotest/regression/test_model_netcdf.py @@ -87,7 +87,7 @@ def check_netcdf(path, mobj, mesh=None): @pytest.mark.regression def test_uzf01_model_scope_nofile(function_tmpdir, example_data_path): sim_name = "uzf01" - netcdf = "nofile" + netcdf = "" fname = f"{sim_name}.structured.nc" data_path_base = example_data_path / "mf6" / "netcdf" ws = function_tmpdir / sim_name @@ -108,7 +108,7 @@ def test_uzf01_model_scope_nofile(function_tmpdir, example_data_path): @pytest.mark.regression def test_uzf02_model_scope_nofile(function_tmpdir, example_data_path): sim_name = "uzf02" - netcdf = "nofile" + netcdf = "" fname = f"{sim_name}.input.nc" # default data_path_base = example_data_path / "mf6" / "netcdf" ws = function_tmpdir / sim_name @@ -230,7 +230,7 @@ def test_uzf02_sim_scope_fname(function_tmpdir, example_data_path): @pytest.mark.regression def test_uzf01_model_scope_nomesh(function_tmpdir, example_data_path): sim_name = "uzf01" - netcdf = "nofile" + netcdf = "" fname = f"{sim_name}.structured.nc" data_path_base = example_data_path / "mf6" / "netcdf" ws = function_tmpdir / sim_name @@ -258,7 +258,7 @@ def test_uzf01_model_scope_nomesh(function_tmpdir, example_data_path): @pytest.mark.regression def test_uzf01_model_scope_mesh(function_tmpdir, example_data_path): sim_name = "uzf01" - netcdf = "nofile" + netcdf = "" mesh = "layered" fname = f"{sim_name}.layered.nc" data_path_base = example_data_path / "mf6" / "netcdf" @@ -287,7 +287,7 @@ def test_uzf01_model_scope_mesh(function_tmpdir, example_data_path): @pytest.mark.regression def test_uzf02_model_scope(function_tmpdir, example_data_path): sim_name = "uzf02" - netcdf = "nofile" + netcdf = "" mesh = "layered" fname = f"{sim_name}.layered.nc" data_path_base = example_data_path / "mf6" / "netcdf" @@ -335,11 +335,11 @@ def test_uzf01_pkg_scope(function_tmpdir, example_data_path): ds = gwf.modelgrid.dataset(modeltime=gwf.modeltime) # get model netcdf info - nc_info = gwf.netcdf_info() + nc_meta = gwf.netcdf_meta() # update dataset directly with required attributes - for a in nc_info["attrs"]: - ds.attrs[a] = nc_info["attrs"][a] + for a in nc_meta["attrs"]: + ds.attrs[a] = nc_meta["attrs"][a] # add all packages and update data for p in gwf.packagelist: @@ -373,11 +373,11 @@ def test_uzf01_pkg_scope_modify(function_tmpdir, example_data_path): ds = gwf.modelgrid.dataset(modeltime=gwf.modeltime) # get model netcdf info - nc_info = gwf.netcdf_info() + nc_meta = gwf.netcdf_meta() # update dataset directly with required attributes - for a in nc_info["attrs"]: - ds.attrs[a] = nc_info["attrs"][a] + for a in nc_meta["attrs"]: + ds.attrs[a] = nc_meta["attrs"][a] # update dataset with `DIS` arrays dis = gwf.get_package("dis") @@ -385,13 +385,13 @@ def test_uzf01_pkg_scope_modify(function_tmpdir, example_data_path): # get npf package netcdf info npf = gwf.get_package("npf") - nc_info = npf.netcdf_info() + nc_meta = npf.netcdf_meta() # update dataset with `NPF` arrays # change k varname and add attribute - nc_info["k"]["varname"] = "npf_k_updated" - nc_info["k"]["attrs"]["standard_name"] = "soil_hydraulic_conductivity_at_saturation" - ds = npf.update_dataset(ds, netcdf_info=nc_info) + nc_meta["k"]["varname"] = "npf_k_updated" + nc_meta["k"]["attrs"]["standard_name"] = "soil_hydraulic_conductivity_at_saturation" + ds = npf.update_dataset(ds, netcdf_meta=nc_meta) # ic ic = gwf.get_package("ic") diff --git a/flopy/discretization/grid.py b/flopy/discretization/grid.py index d46f8577f1..beba293039 100644 --- a/flopy/discretization/grid.py +++ b/flopy/discretization/grid.py @@ -1288,7 +1288,7 @@ def write_shapefile(self, filename="grid.shp", crs=None, prjfile=None, **kwargs) ) return - def dataset(self, modeltime=None, mesh=None, encoding=None): + def dataset(self, modeltime=None, mesh=None, configuration=None): """ Method to generate baseline xarray dataset @@ -1296,7 +1296,7 @@ def dataset(self, modeltime=None, mesh=None, encoding=None): ---------- modeltime : FloPy ModelTime object mesh : mesh type - encoding : variable encoding dictionary + configuration : configuration dictionary Returns ------- diff --git a/flopy/discretization/structuredgrid.py b/flopy/discretization/structuredgrid.py index e511cbf8b0..96b74d3f1c 100644 --- a/flopy/discretization/structuredgrid.py +++ b/flopy/discretization/structuredgrid.py @@ -1770,12 +1770,12 @@ def get_plottable_layer_array(self, a, layer): assert plotarray.shape == required_shape, msg return plotarray - def dataset(self, modeltime=None, mesh=None, encoding=None): + def dataset(self, modeltime=None, mesh=None, configuration=None): """ modeltime : FloPy ModelTime object mesh : mesh type valid mesh types are "layered" or None - encoding : variable encoding dictionary + configuration : configuration dictionary """ from ..utils import import_optional_dependency @@ -1788,11 +1788,11 @@ def dataset(self, modeltime=None, mesh=None, encoding=None): ds.attrs["modflow_grid"] = "STRUCTURED" if mesh and mesh.upper() == "LAYERED": - return self._layered_mesh_dataset(ds, modeltime, encoding) + return self._layered_mesh_dataset(ds, modeltime, configuration) elif mesh is None: - return self._structured_dataset(ds, modeltime, encoding) + return self._structured_dataset(ds, modeltime, configuration) - def _layered_mesh_dataset(self, ds, modeltime=None, encoding=None): + def _layered_mesh_dataset(self, ds, modeltime=None, configuration=None): FILLNA_INT32 = np.int32(-2147483647) FILLNA_DBL = 9.96920996838687e36 lenunits = {0: "m", 1: "ft", 2: "m", 3: "m"} @@ -1895,29 +1895,31 @@ def _layered_mesh_dataset(self, ds, modeltime=None, encoding=None): ds["mesh_face_nodes"].attrs["_FillValue"] = FILLNA_INT32 ds["mesh_face_nodes"].attrs["start_index"] = np.int32(1) - if encoding is not None and "wkt" in encoding and encoding["wkt"] is not None: - ds = ds.assign({"projection": ([], np.int32(1))}) - # wkt override to existing crs - ds["projection"].attrs["wkt"] = encoding["wkt"] + wkt_configured = ( + configuration is not None + and "wkt" in configuration + and configuration["wkt"] is not None + ) + + if wkt_configured or self.crs is not None: ds["mesh_node_x"].attrs["grid_mapping"] = "projection" ds["mesh_node_y"].attrs["grid_mapping"] = "projection" ds["mesh_face_x"].attrs["grid_mapping"] = "projection" ds["mesh_face_y"].attrs["grid_mapping"] = "projection" - elif self.crs is not None: ds = ds.assign({"projection": ([], np.int32(1))}) - ds["projection"].attrs["wkt"] = self.crs.to_wkt() - ds["mesh_node_x"].attrs["grid_mapping"] = "projection" - ds["mesh_node_y"].attrs["grid_mapping"] = "projection" - ds["mesh_face_x"].attrs["grid_mapping"] = "projection" - ds["mesh_face_y"].attrs["grid_mapping"] = "projection" + if wkt_configured: + # wkt override to existing crs + ds["projection"].attrs["wkt"] = configuration["wkt"] + else: + ds["projection"].attrs["wkt"] = self.crs.to_wkt() return ds - def _structured_dataset(self, ds, modeltime=None, encoding=None): + def _structured_dataset(self, ds, modeltime=None, configuration=None): lenunits = {0: "m", 1: "ft", 2: "m", 3: "m"} - x = self.xoffset + self.xycenters[0] - y = self.yoffset + self.xycenters[1] + xc = self.xoffset + self.xycenters[0] + yc = self.yoffset + self.xycenters[1] z = [float(x) for x in range(1, self.nlay + 1)] # set coordinate var bounds @@ -1943,8 +1945,8 @@ def _structured_dataset(self, ds, modeltime=None, encoding=None): var_d = { "time": (["time"], modeltime.totim), "z": (["z"], z), - "y": (["y"], y), - "x": (["x"], x), + "y": (["y"], yc), + "x": (["x"], xc), } ds = ds.assign(var_d) @@ -1970,17 +1972,78 @@ def _structured_dataset(self, ds, modeltime=None, encoding=None): ds["x"].attrs["long_name"] = "Easting" ds["x"].attrs["bounds"] = "x_bnds" - if encoding is not None and "wkt" in encoding and encoding["wkt"] is not None: - ds = ds.assign({"projection": ([], np.int32(1))}) - # wkt override to existing crs - ds["projection"].attrs["crs_wkt"] = encoding["wkt"] + latlon_cfg = ( + configuration is not None + and "latitude" in configuration + and configuration["latitude"] is not None + and "longitude" in configuration + and configuration["longitude"] is not None + ) + + if latlon_cfg or self.crs is not None: + if latlon_cfg: + lats = configuration["latitude"] + lons = configuration["longitude"] + else: + try: + import warnings + + from pyproj import Proj + + epsg_code = self.crs.to_epsg(min_confidence=90) + proj = Proj( + f"EPSG:{epsg_code}", + ) + + lats = [] + lons = [] + x_local = [] + y_local = [] + for y in self.xycenters[1]: + for x in self.xycenters[0]: + x_local.append(x) + y_local.append(y) + + x_global, y_global = self.get_coords(x_local, y_local) + + for i, x in enumerate(x_global): + lon, lat = proj(x, y_global[i], inverse=True) + lats.append(lat) + lons.append(lon) + + lats = np.array(lats) + lons = np.array(lons) + + except Exception as e: + warnings.warn( + f"Cannot create coordinates from CRS: {e}", + UserWarning, + ) + + # create coordinate vars + var_d = { + "lat": (["y", "x"], lats.reshape(yc.size, xc.size)), + "lon": (["y", "x"], lons.reshape(yc.size, xc.size)), + } + ds = ds.assign(var_d) + + # set coordinate attributes + ds["lat"].attrs["units"] = "degrees_north" + ds["lat"].attrs["standard_name"] = "latitude" + ds["lat"].attrs["long_name"] = "latitude" + ds["lon"].attrs["units"] = "degrees_east" + ds["lon"].attrs["standard_name"] = "longitude" + ds["lon"].attrs["long_name"] = "longitude" + + elif ( + configuration is not None + and "wkt" in configuration + and configuration["wkt"] is not None + ): ds["x"].attrs["grid_mapping"] = "projection" ds["y"].attrs["grid_mapping"] = "projection" - elif self.crs is not None: ds = ds.assign({"projection": ([], np.int32(1))}) - ds["projection"].attrs["crs_wkt"] = self.crs.to_wkt() - ds["x"].attrs["grid_mapping"] = "projection" - ds["y"].attrs["grid_mapping"] = "projection" + ds["projection"].attrs["crs_wkt"] = configuration["wkt"] return ds diff --git a/flopy/discretization/vertexgrid.py b/flopy/discretization/vertexgrid.py index 854d41c67e..efd82a61bf 100644 --- a/flopy/discretization/vertexgrid.py +++ b/flopy/discretization/vertexgrid.py @@ -600,13 +600,13 @@ def get_plottable_layer_array(self, a, layer): assert plotarray.shape == required_shape, msg return plotarray - def dataset(self, modeltime=None, mesh=None, encoding=None): + def dataset(self, modeltime=None, mesh=None, configuration=None): """ modeltime : FloPy ModelTime object mesh : mesh type valid mesh types are "layered" or None VertexGrid objects only support layered mesh - encoding : variable encoding dictionary + configuration : configuration dictionary """ from ..utils import import_optional_dependency @@ -718,21 +718,23 @@ def dataset(self, modeltime=None, mesh=None, encoding=None): ds["mesh_face_nodes"].attrs["_FillValue"] = FILLNA_INT32 ds["mesh_face_nodes"].attrs["start_index"] = np.int32(1) - if encoding is not None and "wkt" in encoding and encoding["wkt"] is not None: - ds = ds.assign({"projection": ([], np.int32(1))}) - # wkt override to existing crs - ds["projection"].attrs["wkt"] = encoding["wkt"] + wkt_configured = ( + configuration is not None + and "wkt" in configuration + and configuration["wkt"] is not None + ) + + if wkt_configured or self.crs is not None: ds["mesh_node_x"].attrs["grid_mapping"] = "projection" ds["mesh_node_y"].attrs["grid_mapping"] = "projection" ds["mesh_face_x"].attrs["grid_mapping"] = "projection" ds["mesh_face_y"].attrs["grid_mapping"] = "projection" - elif self.crs is not None: ds = ds.assign({"projection": ([], np.int32(1))}) - ds["projection"].attrs["wkt"] = self.crs.to_wkt() - ds["mesh_node_x"].attrs["grid_mapping"] = "projection" - ds["mesh_node_y"].attrs["grid_mapping"] = "projection" - ds["mesh_face_x"].attrs["grid_mapping"] = "projection" - ds["mesh_face_y"].attrs["grid_mapping"] = "projection" + if wkt_configured: + # wkt override to existing crs + ds["projection"].attrs["wkt"] = configuration["wkt"] + else: + ds["projection"].attrs["wkt"] = self.crs.to_wkt() return ds diff --git a/flopy/mf6/mfmodel.py b/flopy/mf6/mfmodel.py index 289b556cf3..0600c9bf4e 100644 --- a/flopy/mf6/mfmodel.py +++ b/flopy/mf6/mfmodel.py @@ -1324,13 +1324,16 @@ def write( with relative paths, leaving files defined by absolute paths fixed. netcdf : str ASCII package files will be written as configured for NetCDF input. - 'layered', 'structured' and 'nofile' are supported arguments. + 'layered', 'structured' and '' (empty string) are supported arguments. """ - write_netcdf = netcdf and hasattr(self.name_file, "nc_filerecord") + write_netcdf = ( + hasattr(self.name_file, "nc_filerecord") + and netcdf is not None + ) if write_netcdf: - # update name file for input even if "nofile" is configured + # update name file for input even if "" is configured nc_fname = None if self.name_file.nc_filerecord.get_data() is None: # update name file for netcdf input @@ -1376,7 +1379,7 @@ def write( pp._set_netcdf_storage(reset=True) # write netcdf file - if write_netcdf and netcdf.lower() != "nofile": + if write_netcdf and netcdf != "": self._write_netcdf(mesh=netcdf) if nc_fname is not None: self.name_file.nc_filerecord = None @@ -2280,7 +2283,7 @@ def netcdf_model(mname, mtype, grid_type, mesh=None): return {"attrs": attrs} - def netcdf_info(self, mesh=None): + def netcdf_meta(self, mesh=None): """Return dictionary of dataset (model) scoped attributes Parameters ---------- @@ -2291,11 +2294,11 @@ def netcdf_info(self, mesh=None): self.name, self.model_type, self.get_grid_type(), mesh ) - def update_dataset(self, dataset, netcdf_info=None, mesh=None, update_data=True): - if netcdf_info is None: - nc_info = self.netcdf_info(mesh=mesh) + def update_dataset(self, dataset, netcdf_meta=None, mesh=None, update_data=True): + if netcdf_meta is None: + nc_meta = self.netcdf_meta(mesh=mesh) else: - nc_info = netcdf_info + nc_meta = netcdf_meta if ( self.simulation.simulation_data.verbosity_level.value @@ -2303,8 +2306,8 @@ def update_dataset(self, dataset, netcdf_info=None, mesh=None, update_data=True) ): print(f" updating model dataset...") - for a in nc_info["attrs"]: - dataset.attrs[a] = nc_info["attrs"][a] + for a in nc_meta["attrs"]: + dataset.attrs[a] = nc_meta["attrs"][a] # add all packages and update data for p in self.packagelist: @@ -2325,20 +2328,22 @@ def _write_netcdf(self, mesh=None): if mesh is not None and mesh.upper() == "STRUCTURED": mesh = None - encode = {} + config = {} for pp in self.packagelist: if pp.package_type == "ncf": - encode["shuffle"] = pp.shuffle.get_data() - encode["deflate"] = pp.deflate.get_data() - encode["chunk_time"] = pp.chunk_time.get_data() - encode["chunk_face"] = pp.chunk_face.get_data() - encode["chunk_x"] = pp.chunk_x.get_data() - encode["chunk_y"] = pp.chunk_y.get_data() - encode["chunk_z"] = pp.chunk_z.get_data() + config["shuffle"] = pp.shuffle.get_data() + config["deflate"] = pp.deflate.get_data() + config["chunk_time"] = pp.chunk_time.get_data() + config["chunk_face"] = pp.chunk_face.get_data() + config["chunk_x"] = pp.chunk_x.get_data() + config["chunk_y"] = pp.chunk_y.get_data() + config["chunk_z"] = pp.chunk_z.get_data() wkt = pp.wkt.get_data() if wkt is not None: wkt = wkt[0][1] - encode["wkt"] = wkt + config["wkt"] = wkt + config["latitude"] = pp.latitude.get_data() + config["longitude"] = pp.longitude.get_data() if ( self.simulation.simulation_data.verbosity_level.value @@ -2349,24 +2354,24 @@ def _write_netcdf(self, mesh=None): ds = self.modelgrid.dataset( modeltime=self.modeltime, mesh=mesh, - encoding=encode, + configuration=config, ) dt = datetime.datetime.now() timestamp = dt.strftime("%m/%d/%Y %H:%M:%S") - nc_info = self.netcdf_info(mesh=mesh) - nc_info["attrs"]["title"] = f"{self.name.upper()} input" - nc_info["attrs"]["source"] = f"flopy {__version__}" - nc_info["attrs"]["history"] = f"first created {timestamp}" + nc_meta = self.netcdf_meta(mesh=mesh) + nc_meta["attrs"]["title"] = f"{self.name.upper()} input" + nc_meta["attrs"]["source"] = f"flopy {__version__}" + nc_meta["attrs"]["history"] = f"first created {timestamp}" if mesh is None: - nc_info["attrs"]["Conventions"] = "CF-1.11" + nc_meta["attrs"]["Conventions"] = "CF-1.11" elif mesh.upper() is "LAYERED": - nc_info["attrs"]["Conventions"] = "CF-1.11 UGRID-1.0" + nc_meta["attrs"]["Conventions"] = "CF-1.11 UGRID-1.0" ds = self.update_dataset( ds, - netcdf_info=nc_info, + netcdf_meta=nc_meta, mesh=mesh, ) @@ -2374,53 +2379,53 @@ def _write_netcdf(self, mesh=None): chunk_t = False if mesh is None: if ( - "chunk_x" in encode - and encode["chunk_x"] is not None - and "chunk_y" in encode - and encode["chunk_y"] is not None - and "chunk_z" in encode - and encode["chunk_z"] is not None + "chunk_x" in config + and config["chunk_x"] is not None + and "chunk_y" in config + and config["chunk_y"] is not None + and "chunk_z" in config + and config["chunk_z"] is not None ): chunk = True elif mesh.upper() == "LAYERED": - if "chunk_face" in encode and encode["chunk_face"] is not None: + if "chunk_face" in config and config["chunk_face"] is not None: chunk = True - if "chunk_time" in encode and encode["chunk_time"] is not None: + if "chunk_time" in config and config["chunk_time"] is not None: chunk_t = True base_encode = {} - if "deflate" in encode and encode["deflate"] is not None: + if "deflate" in config and config["deflate"] is not None: base_encode["zlib"] = True - base_encode["complevel"] = encode["deflate"] - if "shuffle" in encode and encode["deflate"] is not None: + base_encode["complevel"] = config["deflate"] + if "shuffle" in config and config["deflate"] is not None: base_encode["shuffle"] = True encoding = {} chunk_dims = {'time', 'nmesh_face', 'z', 'y', 'x'} for varname, da in ds.data_vars.items(): dims = ds.data_vars[varname].dims - codes = dict(base_encode) + encode = dict(base_encode) if ( not set(dims).issubset(chunk_dims) or not chunk or not chunk_t ): - encoding[varname] = codes + encoding[varname] = encode continue chunksizes = [] if "time" in dims: - chunksizes.append(encode["chunk_time"]) + chunksizes.append(config["chunk_time"]) if mesh is None: if "z" in dims: - chunksizes.append(encode["chunk_z"]) + chunksizes.append(config["chunk_z"]) if "y" in dims: - chunksizes.append(encode["chunk_y"]) + chunksizes.append(config["chunk_y"]) if "x" in dims: - chunksizes.append(encode["chunk_x"]) + chunksizes.append(config["chunk_x"]) elif mesh.upper() == "LAYERED" and "nmesh_face" in dims: - chunksizes.append(encode["chunk_face"]) + chunksizes.append(config["chunk_face"]) if len(chunksizes) > 0: - codes["chunksizes"] = chunksizes - encoding[varname] = codes + encode["chunksizes"] = chunksizes + encoding[varname] = encode fname = self.name_file.nc_filerecord.get_data()[0][0] diff --git a/flopy/mf6/mfpackage.py b/flopy/mf6/mfpackage.py index 67ae944a16..065b337989 100644 --- a/flopy/mf6/mfpackage.py +++ b/flopy/mf6/mfpackage.py @@ -3618,7 +3618,7 @@ def netcdf_package(mtype, ptype, auxiliary=None, mesh=None, nlay=1): return entries - def netcdf_info(self, mesh=None): + def netcdf_meta(self, mesh=None): entries = {} if self.dimensions.get_aux_variables(): @@ -3649,12 +3649,12 @@ def netcdf_info(self, mesh=None): return entries - def update_dataset(self, dataset, netcdf_info=None, mesh=None, update_data=True): + def update_dataset(self, dataset, netcdf_meta=None, mesh=None, update_data=True): from ..discretization.structuredgrid import StructuredGrid - if netcdf_info is None: - nc_info = self.netcdf_info(mesh=mesh) + if netcdf_meta is None: + nc_meta = self.netcdf_meta(mesh=mesh) else: - nc_info = netcdf_info + nc_meta = netcdf_meta modelgrid = self.model_or_sim.modelgrid modeltime = self.model_or_sim.modeltime @@ -3670,88 +3670,96 @@ def update_dataset(self, dataset, netcdf_info=None, mesh=None, update_data=True) dimmap["y"] = modelgrid.nrow dimmap["x"] = modelgrid.ncol - def _update_data(nc_info, key, dobj=None, data=None): - if "modflow_iaux" in nc_info[key]["attrs"]: - iaux = nc_info[key]["attrs"]["modflow_iaux"] - 1 + def _update_layered(key, iaux, dobj=None, data=None): + if "layer" in nc_meta[key]["attrs"]: + layer = nc_meta[key]["attrs"]["layer"] - 1 else: - iaux = -1 - if mesh == None: - if dobj.repeating: - if iaux >= 0: - for per in data: - if data[per] is None: - continue - istp = sum(modeltime.nstp[0:per]) - auxdata = data[per][iaux] - dataset[nc_info[key]["varname"]].values[istp, :] = ( - auxdata) - else: - for per in data: - if data[per] is None: - continue - istp = sum(modeltime.nstp[0:per]) - if ( - dobj.structure.data_item_structures[0].numeric_index or - dobj.structure.data_item_structures[0].is_cellid): - dataset[nc_info[key]["varname"]].values[istp, :] = ( - data[per] + 1) - else: - dataset[nc_info[key]["varname"]].values[istp, :] = ( - data[per]) - else: - dataset[nc_info[key]["varname"]].values = data - elif mesh.upper() == "LAYERED": - if "layer" in nc_info[key]["attrs"]: - layer = nc_info[key]["attrs"]["layer"] - 1 - else: - layer = -1 - if dobj.repeating: - if iaux >= 0: - for per in data: - if data[per] is None: - continue - auxdata = data[per][iaux] - istp = sum(modeltime.nstp[0:per]) - if self.structure.read_as_arrays: - dataset[nc_info[key]["varname"]].values[istp, :] = ( - auxdata.flatten()) - elif self.structure.read_array_grid: - uidx = istp + auxdata[layer].size - if modelgrid.nlay > 1: - dataset[nc_info[key]["varname"]].values[istp, :] = ( - auxdata[layer].flatten()) - else: - dataset[nc_info[key]["varname"]].values[istp, :] = ( - auxdata.flatten()) - else: - for per in data: - if data[per] is None: - continue - istp = sum(modeltime.nstp[0:per]) - if layer >= 0 and ( - len(dobj.structure.shape) == 3 or - dobj.structure.shape[0] == 'nodes'): - dataset[nc_info[key]["varname"]].values[istp, :] = ( - data[per][layer].flatten()) - else: - if ( - dobj.structure.data_item_structures[0].numeric_index or - dobj.structure.data_item_structures[0].is_cellid): - dataset[nc_info[key]["varname"]].values[istp, :] = ( - data[per].flatten() + 1) - else: - dataset[nc_info[key]["varname"]].values[istp, :] = ( - data[per].flatten()) + layer = -1 + + if not dobj.repeating: + if layer >= 0 and ( + 'nlay' in dobj.structure.shape or + dobj.structure.shape[0] == 'nodes'): + dataset[nc_meta[key]["varname"]].values = ( + data[layer].flatten()) else: + dataset[nc_meta[key]["varname"]].values = ( + data.flatten()) + return + + if iaux >= 0: + for per in data: + if data[per] is None: + continue + auxdata = data[per][iaux] + istp = sum(modeltime.nstp[0:per]) + if self.structure.read_as_arrays: + dataset[nc_meta[key]["varname"]].values[istp, :] = ( + auxdata.flatten()) + elif self.structure.read_array_grid: + uidx = istp + auxdata[layer].size + if modelgrid.nlay > 1: + dataset[nc_meta[key]["varname"]].values[istp, :] = ( + auxdata[layer].flatten()) + else: + dataset[nc_meta[key]["varname"]].values[istp, :] = ( + auxdata.flatten()) + else: + for per in data: + if data[per] is None: + continue + istp = sum(modeltime.nstp[0:per]) if layer >= 0 and ( - 'nlay' in dobj.structure.shape or + len(dobj.structure.shape) == 3 or dobj.structure.shape[0] == 'nodes'): - dataset[nc_info[key]["varname"]].values = ( - data[layer].flatten()) + dataset[nc_meta[key]["varname"]].values[istp, :] = ( + data[per][layer].flatten()) else: - dataset[nc_info[key]["varname"]].values = ( - data.flatten()) + if ( + dobj.structure.data_item_structures[0].numeric_index or + dobj.structure.data_item_structures[0].is_cellid): + dataset[nc_meta[key]["varname"]].values[istp, :] = ( + data[per].flatten() + 1) + else: + dataset[nc_meta[key]["varname"]].values[istp, :] = ( + data[per].flatten()) + def _update_structured(key, iaux, dobj=None, data=None): + if not dobj.repeating: + dataset[nc_meta[key]["varname"]].values = data + return + + if iaux >= 0: + for per in data: + if data[per] is None: + continue + istp = sum(modeltime.nstp[0:per]) + auxdata = data[per][iaux] + dataset[nc_meta[key]["varname"]].values[istp, :] = ( + auxdata) + else: + for per in data: + if data[per] is None: + continue + istp = sum(modeltime.nstp[0:per]) + if ( + dobj.structure.data_item_structures[0].numeric_index or + dobj.structure.data_item_structures[0].is_cellid): + dataset[nc_meta[key]["varname"]].values[istp, :] = ( + data[per] + 1) + else: + dataset[nc_meta[key]["varname"]].values[istp, :] = ( + data[per]) + + def _update_data(key, dobj=None, data=None): + if "modflow_iaux" in nc_meta[key]["attrs"]: + iaux = nc_meta[key]["attrs"]["modflow_iaux"] - 1 + else: + iaux = -1 + if mesh == None: + _update_structured(key, iaux, dobj, data) + elif mesh.upper() == "LAYERED": + _update_layered(key, iaux, dobj, data) def _data_shape(shape): dims_l = [] @@ -3761,41 +3769,41 @@ def _data_shape(shape): return dims_l projection = "projection" in dataset.data_vars + latlon = "lat" in dataset.data_vars and "lon" in dataset.data_vars last_path = '' pitem = None pdata = None - for v in nc_info: - varname = nc_info[v]["varname"] + for v in nc_meta: + varname = nc_meta[v]["varname"] data = np.full( - _data_shape(nc_info[v]["netcdf_shape"]), - nc_info[v]["attrs"]["_FillValue"], - dtype=nc_info[v]["xarray_type"], + _data_shape(nc_meta[v]["netcdf_shape"]), + nc_meta[v]["attrs"]["_FillValue"], + dtype=nc_meta[v]["xarray_type"], ) - var_d = {varname: (nc_info[v]["netcdf_shape"], data)} + var_d = {varname: (nc_meta[v]["netcdf_shape"], data)} dataset = dataset.assign(var_d) - for a in nc_info[v]["attrs"]: - dataset[varname].attrs[a] = nc_info[v]["attrs"][a] + for a in nc_meta[v]["attrs"]: + dataset[varname].attrs[a] = nc_meta[v]["attrs"][a] + dims = dataset[varname].dims if projection: - dims = dataset[varname].dims if "nmesh_face" in dims or "nmesh_node" in dims: dataset[varname].attrs["grid_mapping"] = "projection" dataset[varname].attrs["coordinates"] = "mesh_face_x mesh_face_y" elif mesh is None and len(dims) > 1: - # TODO don't set if lon / lat? dataset[varname].attrs["grid_mapping"] = "projection" dataset[varname].attrs["coordinates"] = "x y" + elif latlon and mesh is None and len(dims) > 1: + dataset[varname].attrs["coordinates"] = "lon lat" if update_data: - path = nc_info[v]["attrs"]["modflow_input"] + path = nc_meta[v]["attrs"]["modflow_input"] tag = path.split("/")[2].lower() if path != last_path: - pitem = None - pdata = None pitem = getattr(self, tag) pdata = pitem.get_data() last_path = path - _update_data(nc_info, v, pitem, pdata) + _update_data(v, pitem, pdata) return dataset diff --git a/flopy/mf6/mfsimbase.py b/flopy/mf6/mfsimbase.py index 5ec4f4be44..90eaaa2da8 100644 --- a/flopy/mf6/mfsimbase.py +++ b/flopy/mf6/mfsimbase.py @@ -1675,7 +1675,7 @@ def write_simulation( Writes out the simulation in silent mode (verbosity_level = 0) netcdf : str ASCII package files will be written as configured for NetCDF input. - 'layered', 'structured' and 'nofile' are supported arguments. + 'layered', 'structured' and '' (empty string) are supported arguments. """ sim_data = self.simulation_data From c92deacb6248554c7a21240e27ad8bc60c81a273 Mon Sep 17 00:00:00 2001 From: mjreno Date: Fri, 26 Sep 2025 16:32:32 -0400 Subject: [PATCH 40/44] enhanced grid support for netcdf --- flopy/discretization/grid.py | 10 ++- flopy/discretization/modeltime.py | 37 ++++++++++ flopy/discretization/structuredgrid.py | 93 ++++++++++++++------------ flopy/discretization/vertexgrid.py | 4 +- flopy/mf6/mfmodel.py | 51 ++++++++++---- 5 files changed, 136 insertions(+), 59 deletions(-) diff --git a/flopy/discretization/grid.py b/flopy/discretization/grid.py index beba293039..30b61f57d9 100644 --- a/flopy/discretization/grid.py +++ b/flopy/discretization/grid.py @@ -1185,12 +1185,14 @@ def read_usgs_model_reference_file(self, reffile="usgs.model.reference"): yul = None if os.path.exists(reffile): with open(reffile) as input: + print(f"Updating grid based on reference: {reffile}") for line in input: if len(line) > 1: if line.strip()[0] != "#": info = line.strip().split("#")[0].split() if len(info) > 1: data = " ".join(info[1:]).strip("'").strip('"') + print(f"Grid update on reference: {info}") if info[0] == "xll": self._xoff = float(data) elif info[0] == "yll": @@ -1201,12 +1203,14 @@ def read_usgs_model_reference_file(self, reffile="usgs.model.reference"): yul = float(data) elif info[0] == "rotation": self._angrot = float(data) - elif info[0] == "epsg": + elif info[0] == "length_units": + self._units = data.lower() + elif info[0].lower() == "epsg": self.epsg = int(data) elif info[0] == "proj4": self.crs = data - elif info[0] == "start_date": - start_datetime = data + else: + print(" ->warn: update not applied.") # model must be rotated first, before setting xoff and yoff # when xul and yul are provided. diff --git a/flopy/discretization/modeltime.py b/flopy/discretization/modeltime.py index f072d11070..e023909595 100644 --- a/flopy/discretization/modeltime.py +++ b/flopy/discretization/modeltime.py @@ -1,4 +1,5 @@ import calendar +import os from dataclasses import dataclass, field from datetime import datetime, timedelta from difflib import SequenceMatcher @@ -802,3 +803,39 @@ def reverse(self) -> "ModelTime": self.start_datetime, self.steady_state[::-1] if self.steady_state is not None else None, ) + + def read_usgs_model_reference_file(self, reffile="usgs.model.reference"): + """read spatial reference info from the usgs.model.reference file + https://water.usgs.gov/ogw/policy/gw-model/modelers-setup.html""" + if os.path.exists(reffile): + start_date_time = "" + with open(reffile) as input: + print(f"Updating modeltime based on reference: {reffile}") + for line in input: + if len(line) > 1: + if line.strip()[0] != "#": + info = line.strip().split("#")[0].split() + if len(info) > 1: + data = " ".join(info[1:]).strip("'").strip('"') + print(f"ModelTime update on reference: {info}") + if info[0].lower() == "time_units": + self.time_units = data.lower() + elif info[0] == "start_date": + if len(start_date_time) > 0: + start_date_time = f"{data} {start_date_time}" + else: + start_date_time = data + elif info[0] == "start_time": + if len(start_date_time) > 0: + start_date_time = f"{start_date_time} {data}" + else: + start_date_time = data + else: + print(" ->warn: update not applied.") + + if len(start_date_time) > 0: + self.start_datetime = start_date_time + + return True + else: + return False diff --git a/flopy/discretization/structuredgrid.py b/flopy/discretization/structuredgrid.py index 96b74d3f1c..075720d6ae 100644 --- a/flopy/discretization/structuredgrid.py +++ b/flopy/discretization/structuredgrid.py @@ -1770,6 +1770,46 @@ def get_plottable_layer_array(self, a, layer): assert plotarray.shape == required_shape, msg return plotarray + def latlon(self): + try: + import warnings + + from pyproj import Proj + + epsg = None + if self.crs is not None: + epsg = self.crs.to_epsg() + + proj = Proj( + f"EPSG:{epsg}", + ) + + lats = [] + lons = [] + x_local = [] + y_local = [] + for y in self.xycenters[1]: + for x in self.xycenters[0]: + x_local.append(x) + y_local.append(y) + + x_global, y_global = self.get_coords(x_local, y_local) + + for i, x in enumerate(x_global): + lon, lat = proj(x, y_global[i], inverse=True) + lats.append(lat) + lons.append(lon) + + return np.array(lats), np.array(lons) + + except Exception as e: + warnings.warn( + f"Cannot create coordinates from CRS: {e}", + UserWarning, + ) + + return None, None + def dataset(self, modeltime=None, mesh=None, configuration=None): """ modeltime : FloPy ModelTime object @@ -1803,7 +1843,9 @@ def _layered_mesh_dataset(self, ds, modeltime=None, configuration=None): } ds = ds.assign(var_d) ds["time"].attrs["calendar"] = "standard" - ds["time"].attrs["units"] = f"days since {modeltime.start_datetime}" + ds["time"].attrs["units"] = ( + f"{modeltime.time_units} since {modeltime.start_datetime}" + ) ds["time"].attrs["axis"] = "T" ds["time"].attrs["standard_name"] = "time" ds["time"].attrs["long_name"] = "time" @@ -1955,7 +1997,9 @@ def _structured_dataset(self, ds, modeltime=None, configuration=None): ds = ds.assign(var_d) ds["time"].attrs["calendar"] = "standard" - ds["time"].attrs["units"] = f"days since {modeltime.start_datetime}" + ds["time"].attrs["units"] = ( + f"{modeltime.time_units} since {modeltime.start_datetime}" + ) ds["time"].attrs["axis"] = "T" ds["time"].attrs["standard_name"] = "time" ds["time"].attrs["long_name"] = "time" @@ -1980,46 +2024,13 @@ def _structured_dataset(self, ds, modeltime=None, configuration=None): and configuration["longitude"] is not None ) - if latlon_cfg or self.crs is not None: - if latlon_cfg: - lats = configuration["latitude"] - lons = configuration["longitude"] - else: - try: - import warnings - - from pyproj import Proj - - epsg_code = self.crs.to_epsg(min_confidence=90) - proj = Proj( - f"EPSG:{epsg_code}", - ) - - lats = [] - lons = [] - x_local = [] - y_local = [] - for y in self.xycenters[1]: - for x in self.xycenters[0]: - x_local.append(x) - y_local.append(y) - - x_global, y_global = self.get_coords(x_local, y_local) - - for i, x in enumerate(x_global): - lon, lat = proj(x, y_global[i], inverse=True) - lats.append(lat) - lons.append(lon) - - lats = np.array(lats) - lons = np.array(lons) - - except Exception as e: - warnings.warn( - f"Cannot create coordinates from CRS: {e}", - UserWarning, - ) + if latlon_cfg: + lats = configuration["latitude"] + lons = configuration["longitude"] + elif self.crs is not None: + lats, lons = self.latlon() + if lats is not None and lons is not None: # create coordinate vars var_d = { "lat": (["y", "x"], lats.reshape(yc.size, xc.size)), diff --git a/flopy/discretization/vertexgrid.py b/flopy/discretization/vertexgrid.py index efd82a61bf..80823e64c9 100644 --- a/flopy/discretization/vertexgrid.py +++ b/flopy/discretization/vertexgrid.py @@ -631,7 +631,9 @@ def dataset(self, modeltime=None, mesh=None, configuration=None): } ds = ds.assign(var_d) ds["time"].attrs["calendar"] = "standard" - ds["time"].attrs["units"] = f"days since {modeltime.start_datetime}" + ds["time"].attrs["units"] = ( + f"{modeltime.time_units} since {modeltime.start_datetime}" + ) ds["time"].attrs["axis"] = "T" ds["time"].attrs["standard_name"] = "time" ds["time"].attrs["long_name"] = "time" diff --git a/flopy/mf6/mfmodel.py b/flopy/mf6/mfmodel.py index 0600c9bf4e..9b5103c584 100644 --- a/flopy/mf6/mfmodel.py +++ b/flopy/mf6/mfmodel.py @@ -458,6 +458,11 @@ def modelgrid(self): if idomain is None: force_resync = True idomain = self._resolve_idomain(idomain, botm) + crs = self._modelgrid.crs + if crs is None and hasattr(dis, "crs"): + crs = dis.crs.get_data() + if crs is not None: + crs = crs[0][1] self._modelgrid = StructuredGrid( delc=dis.delc.array, delr=dis.delr.array, @@ -465,7 +470,7 @@ def modelgrid(self): botm=botm, idomain=idomain, lenuni=dis.length_units.array, - crs=self._modelgrid.crs, + crs=crs, xoff=self._modelgrid.xoffset, yoff=self._modelgrid.yoffset, angrot=self._modelgrid.angrot, @@ -496,6 +501,11 @@ def modelgrid(self): if idomain is None: force_resync = True idomain = self._resolve_idomain(idomain, botm) + crs = self._modelgrid.crs + if crs is None and hasattr(dis, "crs"): + crs = dis.crs.get_data() + if crs is not None: + crs = crs[0][1] self._modelgrid = VertexGrid( vertices=dis.vertices.array, cell2d=dis.cell2d.array, @@ -503,7 +513,7 @@ def modelgrid(self): botm=botm, idomain=idomain, lenuni=dis.length_units.array, - crs=self._modelgrid.crs, + crs=crs, xoff=self._modelgrid.xoffset, yoff=self._modelgrid.yoffset, angrot=self._modelgrid.angrot, @@ -525,6 +535,11 @@ def modelgrid(self): idomain = dis.idomain.array if idomain is None: idomain = np.ones(dis.nodes.array, dtype=int) + crs = self._modelgrid.crs + if crs is None and hasattr(dis, "crs"): + crs = dis.crs.get_data() + if crs is not None: + crs = crs[0][1] if cell2d is None: if ( self.simulation.simulation_data.verbosity_level.value @@ -557,7 +572,7 @@ def modelgrid(self): idomain=idomain, lenuni=dis.length_units.array, ncpl=ncpl, - crs=self._modelgrid.crs, + crs=crs, xoff=self._modelgrid.xoffset, yoff=self._modelgrid.yoffset, angrot=self._modelgrid.angrot, @@ -590,6 +605,11 @@ def modelgrid(self): if idomain is None: force_resync = True idomain = self._resolve_idomain(idomain, botm) + crs = self._modelgrid.crs + if crs is None and hasattr(dis, "crs"): + crs = dis.crs.get_data() + if crs is not None: + crs = crs[0][1] self._modelgrid = VertexGrid( vertices=dis.vertices.array, cell1d=dis.cell1d.array, @@ -597,7 +617,7 @@ def modelgrid(self): botm=botm, idomain=idomain, lenuni=dis.length_units.array, - crs=self._modelgrid.crs, + crs=crs, xoff=self._modelgrid.xoffset, yoff=self._modelgrid.yoffset, angrot=self._modelgrid.angrot, @@ -628,6 +648,11 @@ def modelgrid(self): if idomain is None: force_resync = True idomain = self._resolve_idomain(idomain, botm) + crs = self._modelgrid.crs + if crs is None and hasattr(dis, "crs"): + crs = dis.crs.get_data() + if crs is not None: + crs = crs[0][1] self._modelgrid = StructuredGrid( delc=dis.delc.array, delr=dis.delr.array, @@ -635,7 +660,7 @@ def modelgrid(self): botm=botm, idomain=idomain, lenuni=dis.length_units.array, - crs=self._modelgrid.crs, + crs=crs, xoff=self._modelgrid.xoffset, yoff=self._modelgrid.yoffset, angrot=self._modelgrid.angrot, @@ -666,6 +691,11 @@ def modelgrid(self): if idomain is None: force_resync = True idomain = self._resolve_idomain(idomain, botm) + crs = self._modelgrid.crs + if crs is None and hasattr(dis, "crs"): + crs = dis.crs.get_data() + if crs is not None: + crs = crs[0][1] self._modelgrid = VertexGrid( vertices=dis.vertices.array, cell2d=dis.cell2d.array, @@ -673,7 +703,7 @@ def modelgrid(self): botm=botm, idomain=idomain, lenuni=dis.length_units.array, - crs=self._modelgrid.crs, + crs=crs, xoff=self._modelgrid.xoffset, yoff=self._modelgrid.yoffset, angrot=self._modelgrid.angrot, @@ -1361,13 +1391,6 @@ def write( if write_netcdf: # set data storage to write ascii for netcdf pp._set_netcdf_storage() - if ( - pp.package_type.startswith("dis") - and hasattr(pp, "crs") - ): - crs = pp.crs.get_data() - if crs is not None and self.modelgrid.crs is None: - self.modelgrid.crs = crs[0][1] if ( self.simulation_data.verbosity_level.value >= VerbosityLevel.normal.value @@ -2366,7 +2389,7 @@ def _write_netcdf(self, mesh=None): nc_meta["attrs"]["history"] = f"first created {timestamp}" if mesh is None: nc_meta["attrs"]["Conventions"] = "CF-1.11" - elif mesh.upper() is "LAYERED": + elif mesh.upper() == "LAYERED": nc_meta["attrs"]["Conventions"] = "CF-1.11 UGRID-1.0" ds = self.update_dataset( From 319d7b49be2e3247ec8921ff70c0425df224745d Mon Sep 17 00:00:00 2001 From: mjreno Date: Fri, 26 Sep 2025 16:42:29 -0400 Subject: [PATCH 41/44] fix pyproject toml file --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 26a3ae14d7..2185126b4c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -84,7 +84,7 @@ optional = [ "vtk >=9.4.0", "xmipy", "h5py", - "scikit-learn" + "scikit-learn", "xarray", ] doc = [ From 3c539dc7912539579be6cd9453ec1a5f6a49f54c Mon Sep 17 00:00:00 2001 From: mjreno Date: Thu, 9 Oct 2025 09:35:39 -0400 Subject: [PATCH 42/44] add notebook example for specific parameters --- .docs/Notebooks/mf6_netcdf01_tutorial.py | 230 +++++++++++++++++++---- autotest/test_mf6.py | 32 ++-- flopy/discretization/grid.py | 2 +- flopy/discretization/modeltime.py | 2 +- flopy/discretization/structuredgrid.py | 6 +- flopy/discretization/vertexgrid.py | 2 +- flopy/mf6/data/mfdataarray.py | 1 + flopy/mf6/data/mfdatastorage.py | 1 - flopy/mf6/mfmodel.py | 5 +- 9 files changed, 223 insertions(+), 58 deletions(-) diff --git a/.docs/Notebooks/mf6_netcdf01_tutorial.py b/.docs/Notebooks/mf6_netcdf01_tutorial.py index 9ecc33c9a8..523397af68 100644 --- a/.docs/Notebooks/mf6_netcdf01_tutorial.py +++ b/.docs/Notebooks/mf6_netcdf01_tutorial.py @@ -19,11 +19,12 @@ # ## Write MODFLOW 6 NetCDF simulation # # This tutorial demonstrates how to generate a MODFLOW 6 NetCDF file -# from an existing FloPy simulation. Two variations will be shown. +# from an existing FloPy simulation. Three variations will be shown. # In the first, FloPy will generate the file with a modified # `write_simulation()` call. The second method is more interactive, # providing an opputinity to modify the dataset before it is written -# to NetCDF. +# to NetCDF. The third example will show how to update the simulation +# and dataset for a few specific parameters. # # Support for generating a MODFLOW 6 NetCDF input without a defined # FloPy mf6 model or package instances is briefly discussed at the @@ -54,7 +55,7 @@ sim_name = "uzf01" -# Check if we are in the repository and define the data path. +# ### Check if we are in the repository and define the data path. try: root = Path(git.Repo(".", search_parent_directories=True).working_dir) @@ -88,13 +89,13 @@ known_hash=fhash, ) -# ## Create simulation workspace +# ### Create simulation workspace # create temporary directories temp_dir = TemporaryDirectory() workspace = Path(temp_dir.name) -# ## Load and run baseline simulation +# ### Load and run baseline simulation # # For the purposes of this tutorial, the specifics of the simulation # other than it is a candidate for NetCDF input are not a focus. It @@ -106,14 +107,14 @@ # found in the `MODFLOW 6 - Description of Input and Output` (`mf6io.pdf`), # also available at the nightly build repository linked above. -# load and run the non-netcdf simulation +# load and run the ASCII based simulation sim = flopy.mf6.MFSimulation.load(sim_ws=data_path / sim_name) sim.set_sim_path(workspace) sim.write_simulation() success, buff = sim.run_simulation(silent=True, report=True) assert success, pformat(buff) -# ## Create NetCDF based simulation method 1 +# ## Create NetCDF based simulation method 1 (non-interactive) # # The most straightforward way to create a NetCDF simulation # from the loaded simulation is to provide a `netcdf` argument @@ -126,7 +127,7 @@ # this step is not taken, the default name of `{model_name}.input.nc` # is used. -# create directory for netcdf sim +# create directory for the NetCDF sim sim.set_sim_path(workspace / "netcdf1") # set model name file nc_filerecord attribute to export name gwf = sim.get_model("uzf01") @@ -134,7 +135,7 @@ # write simulation with structured NetCDF file sim.write_simulation(netcdf="structured") -# ## Run MODFLOW 6 simulation with NetCDF input +# ### Run MODFLOW 6 simulation with NetCDF input # # The simulation generated by this tutorial should run with the # Extended MODFLOW 6 executable, available from the nightly-build @@ -143,9 +144,9 @@ # success, buff = sim.run_simulation(silent=True, report=True) # assert success, pformat(buff) -# ## Repeat method 1 with layered mesh NetCDF format +# ### Repeat method 1 with layered mesh NetCDF format -# create directory for netcdf sim +# create directory for the NetCDF sim sim.set_sim_path(workspace / "netcdf2") # set model name file nc_filerecord attribute to export name gwf = sim.get_model("uzf01") @@ -153,7 +154,7 @@ # write simulation with with layered mesh NetCDF file sim.write_simulation(netcdf="layered") -# ## Run MODFLOW 6 simulation with NetCDF input +# ### Run MODFLOW 6 simulation with NetCDF input # # The simulation generated by this tutorial should run with the # Extended MODFLOW 6 executable, available from the nightly-build @@ -162,7 +163,7 @@ # success, buff = sim.run_simulation(silent=True, report=True) # assert success, pformat(buff) -# ## Create NetCDF based simulation method 2 +# ## Create NetCDF based simulation method 2 (interactive) # # In this method we will set the FloPy `netcdf` argument to `` (empty # string) when `write_simulation()` is called. As such, FloPy will @@ -181,7 +182,7 @@ # parameter data in the dataset; however an `update_data=False` argument # could be passed to the `update_dataset()` call if this was not desired. -# create directory for netcdf sim +# create directory for the NetCDF sim sim.set_sim_path(workspace / "netcdf3") # set model name file nc_filerecord attribute to export name gwf = sim.get_model("uzf01") @@ -190,19 +191,19 @@ # but do not create NetCDF file sim.write_simulation(netcdf="") -# ## Show name file with NetCDF input configured +# ### Show name file with NetCDF input configured # show name file with NetCDF input configured with open(workspace / "netcdf3" / "uzf01.nam", "r") as fh: print(fh.read()) -# ## Show example package file with NetCDF keywords +# ### Show example package file with NetCDF keywords # show example package file with NetCDF input configured with open(workspace / "netcdf3" / "uzf01.ic", "r") as fh: print(fh.read()) -# ## Create dataset +# ### Create dataset # # Create the base xarray dataset from the modelgrid object. This # will add required dimensions and coordinate variables to the @@ -212,7 +213,7 @@ # create the dataset ds = gwf.modelgrid.dataset(modeltime=gwf.modeltime) -# ## Access model NetCDF attributes +# ### Access model NetCDF attributes # # Internally, FloPy generates and uses NetCDF metadata dictionaries # to update datasets. Both model and package objects can generate @@ -228,7 +229,7 @@ # `structured` or `layered`). In this step we will access the model # NetCDF metadata and use it to update dataset scoped attributes. # -# First, retrieve and store the netcdf metadata dictionary and display +# First, retrieve and store the NetCDF metadata dictionary and display # its contents. Then, in the following step, update the dataset with # the model scoped attributes defined in the dictionary. # @@ -237,7 +238,7 @@ # opportunity to modify the intermediate metadata dictionary. # Examples of this approach (with package objects) are shown below. -# get model netcdf info +# retrieve the model NetCDF meta dictionary nc_meta = gwf.netcdf_meta() pprint(nc_meta) @@ -245,7 +246,7 @@ for a in nc_meta["attrs"]: ds.attrs[a] = nc_meta["attrs"][a] -# ## Update the dataset with supported `DIS` arrays +# ### Update the dataset with supported `DIS` arrays # # First, we will show how package NetCDF parameters can be added to the # dataset without using the NetCDF metadata dictionary. We will use the @@ -257,7 +258,7 @@ dis = gwf.get_package("dis") ds = dis.update_dataset(ds) -# ## Access `NPF` package NetCDF attributes +# ### Access `NPF` package NetCDF attributes # # Access package scoped NetCDF details by storing the dictionary returned # from `netcdf_meta()`. @@ -271,7 +272,7 @@ nc_meta = npf.netcdf_meta() pprint(nc_meta) -# ## Update package NetCDF metadata dictionary and dataset +# ### Update package NetCDF metadata dictionary and dataset # # Here we update the metadata dictionary and then pass it directly to the # `update_dataset()` function which uses it when adding variables to the @@ -288,42 +289,42 @@ nc_meta["k"]["attrs"]["standard_name"] = "soil_hydraulic_conductivity_at_saturation" ds = npf.update_dataset(ds, netcdf_meta=nc_meta) -# ## Show dataset `NPF K` parameter with updates +# ### Show dataset `NPF K` parameter with updates # print dataset npf k variable print(ds["npf_k_updated"]) -# ## Update the dataset with supported `IC` arrays +# ### Update the dataset with supported `IC` arrays # ic ic = gwf.get_package("ic") ds = ic.update_dataset(ds) -# ## Update the dataset with supported `STO` arrays +# ### Update the dataset with supported `STO` arrays # storage sto = gwf.get_package("sto") ds = sto.update_dataset(ds) -# ## Update the dataset with supported `GHBG` arrays +# ### Update the dataset with supported `GHBG` arrays # update dataset with 'GHBG' arrays ghbg = gwf.get_package("ghbg_0") ds = ghbg.update_dataset(ds) -# ## Display generated dataset +# ### Display generated dataset # show the dataset print(ds) -# ## Export generated dataset to NetCDF +# ### Export generated dataset to NetCDF -# write dataset to netcdf +# write dataset to NetCDF ds.to_netcdf( workspace / "netcdf3" / "uzf01.structured.nc", format="NETCDF4", engine="netcdf4" ) -# ## Run MODFLOW 6 simulation with NetCDF input +# ### Run MODFLOW 6 simulation with NetCDF input # # The simulation generated by this tutorial should run with the # Extended MODFLOW 6 executable, available from the nightly-build @@ -332,18 +333,177 @@ # success, buff = sim.run_simulation(silent=True, report=True) # assert success, pformat(buff) -# ## Method 3: DIY with xarray +# ## Support for independently creating and managing the dataset +# +# The NetCDF meta dictionaries are intended in part to support various +# workflows that might be used to generate a MODFLOW 6 input NetCDF file. +# A few more examples are shown here to demonstrate flexibility with +# respect to approach. First, we will use FloPy objects to retrieve +# instance NetCDF meta dictionaries but not to update the dataset. Finally, +# we will show how to retrieve NetCDF meta dictionaries when no FloPy +# objects are at hand. +# +# In particular, the next example shows how to add only select input +# parameters to the dataset. At this time, this sort of approach requires +# manual updating of the MODFLOW 6 ASCII input files, which is also +# shown here. + +# ### Reload and write the simulation so that ASCII files can be edited + +sim = flopy.mf6.MFSimulation.load(sim_ws=data_path / sim_name) +gwf = sim.get_model("uzf01") +gwf.name_file.nc_filerecord = "uzf01.structured.nc" +sim.set_sim_path(workspace / "netcdf4") +sim.write_simulation() + +# ### Show name file with NetCDF input configured + +# As the nc_filerecord attribute was configured, name file is already +# updated for NetCDF input +with open(workspace / "netcdf4" / "uzf01.nam", "r") as fh: + print(fh.read()) + +# ### Create the dataset +# +# Although we are accessing the modelgrid through the FloPy model +# object, this does not need to be case. Creating the modelgrid object +# independent of the model provides the same functionality. + +modelgrid = gwf.modelgrid +modeltime = gwf.modeltime +ds = modelgrid.dataset(modeltime=gwf.modeltime) + +# ### Retrieve model NetCDF meta dictionary + +nc_meta = gwf.netcdf_meta() +pprint(nc_meta) + +# ### Update dataset with required dataset scoped attributes + +for a in nc_meta["attrs"]: + ds.attrs[a] = nc_meta["attrs"][a] + +# ### Retrieve NPF NetCDF meta dictionary + +npf_nc_meta = gwf.get_package("npf").netcdf_meta() +pprint(npf_nc_meta) + +# ### Set actual dimensions in order of npf_nc_meta["k"]["netcdf_shape"] + +shape = (modelgrid.nlay, modelgrid.ncol, modelgrid.nrow) + +# ### Set varname and default data using NPF NetCDF meta dictionary + +varname = npf_nc_meta["k"]["varname"] +data = np.full( + shape, + npf_nc_meta["k"]["attrs"]["_FillValue"], + dtype=npf_nc_meta["k"]["xarray_type"], +) + +# ### Add parameter with fill values to dataset + +var_d = {varname: (npf_nc_meta["k"]["netcdf_shape"], data)} +ds = ds.assign(var_d) + +# ### Update parameter attributes from NetCDF meta dictionary + +for a in npf_nc_meta["k"]["attrs"]: + ds[varname].attrs[a] = npf_nc_meta["k"]["attrs"][a] + +# ### Update parameter data (set identical to ASCII sim) + +k = np.ones(shape, dtype=float) +ds[varname].values = k + +# ### Edit the NPF package ASCII file to specify K is in dataset + +with open(workspace / "netcdf4" / "uzf01.npf", "w") as f: + f.write("BEGIN options\n") + f.write("END options\n\n") + f.write("BEGIN griddata\n") + f.write(" icelltype\n") + f.write(" CONSTANT 1\n") + f.write(" k netcdf\n") + f.write("END griddata\n") + +# ### Retrieve GHBG NetCDF meta dictionary + +ghbg_nc_meta = gwf.get_package("ghbg").netcdf_meta() +pprint(ghbg_nc_meta) + +# ### Set actual dimensions in order of ghbg_nc_meta["bhead"]["netcdf_shape"] + +shape = (sum(modeltime.nstp), modelgrid.nlay, modelgrid.ncol, modelgrid.nrow) + +# ### Set varnames and data with help of GHBG NetCDF meta dictionary + +varnames = ["bhead", "cond"] +idata = [1.5, 1.0] +for i, v in enumerate(varnames): + varname = ghbg_nc_meta[v]["varname"] + data = np.full( + shape, + ghbg_nc_meta[v]["attrs"]["_FillValue"], + dtype=ghbg_nc_meta[v]["xarray_type"], + ) + + # add parameters to dataset + var_d = {varname: (ghbg_nc_meta[v]["netcdf_shape"], data)} + ds = ds.assign(var_d) + + # apply parameter attributes + for a in ghbg_nc_meta[v]["attrs"]: + ds[varname].attrs[a] = ghbg_nc_meta[v]["attrs"][a] + + # update data to be consistent with ASCII simulation + ds[varname].values[0, 99, 0, 0] = idata[i] + +# ### Edit the GHBG package ASCII file to specify BHEAD and COND are in the dataset + +with open(workspace / "netcdf4" / "uzf01.ghbg", "w") as f: + f.write("BEGIN options\n") + f.write(" READARRAYGRID\n") + f.write(" PRINT_INPUT\n") + f.write(" PRINT_FLOWS\n") + f.write(" OBS6 FILEIN uzf01.ghb.obs\n") + f.write("END options\n\n") + f.write("BEGIN period 1\n") + f.write(" bhead netcdf\n") + f.write(" cond netcdf\n") + f.write("END period 1\n") + +# ### Display the dataset + +print(ds) + +# ### Write the dataset + +ds.to_netcdf( + workspace / "netcdf4" / "uzf01.structured.nc", format="NETCDF4", engine="netcdf4" +) + +# ### Run MODFLOW 6 simulation with NetCDF input +# +# The simulation generated by this tutorial should run with the +# Extended MODFLOW 6 executable, available from the nightly-build +# repository (linked above). + +# success, buff = sim.run_simulation(silent=True, report=True) +# assert success, pformat(buff) + +# ## NetCDF meta dictionary without FloPy objects # # The above method still uses FloPy objects to update the dataset arrays # to values consistent with the state of the objects. The `netcdf_meta` # dictionary is intended to supported creation of the dataset without # an existing simulation defined. The base dataset can be defined with -# `modelgrid` and `modeltime` objects, while the full package netcdf +# `modelgrid` and `modeltime` objects, while the full package NetCDF meta # dictionary can retrieved with a static call to a model or package mf6 # type. The auxiliary input is optional but does show the variables that # would be required if auxiliary variables were defined in the package. -# ## Demonstrate static call on MFPackage (structured dataset): +# ### Demonstrate static call on MFPackage (structured dataset): netcdf_meta = flopy.mf6.mfpackage.MFPackage.netcdf_package( mtype="GWF", @@ -352,7 +512,7 @@ ) pprint(netcdf_meta) -# ## Demonstrate static call on MFPackage (layered dataset): +# ### Demonstrate static call on MFPackage (layered dataset): netcdf_meta = flopy.mf6.mfpackage.MFPackage.netcdf_package( mtype="GWF", ptype="NPF", mesh="LAYERED", auxiliary=["CONCENTRATION"], nlay=2 diff --git a/autotest/test_mf6.py b/autotest/test_mf6.py index b7cb43474b..23e223dc0f 100644 --- a/autotest/test_mf6.py +++ b/autotest/test_mf6.py @@ -2131,7 +2131,7 @@ def test_grid_array(function_tmpdir): assert not wel.has_stress_period_data q_nan = np.where(wel.q.array == DNODATA, np.nan, wel.q.array) - val_q = np.nansum(q_nan, axis=(1, 2, 3, 4)) + val_q = np.nansum(q_nan, axis=(1, 2, 3)) assert val_q[0] == 0.0 assert val_q[1] == 0.25 assert val_q[2] == 0.1 @@ -2189,7 +2189,6 @@ def test_grid_array(function_tmpdir): assert len(wel.aux.get_data()) == 4 assert wel.q.get_data()[0] is None assert wel.q.get_data(0) is None - wel_q_array = wel.q.array assert np.allclose(wel.q.get_data()[1], wel.q.get_data(1)) assert np.allclose(wel.q.get_data()[2], wel.q.get_data(2)) assert np.allclose(wel.q.array[1], wel.q.get_data(1)) @@ -2199,12 +2198,15 @@ def test_grid_array(function_tmpdir): assert np.allclose(wel.aux.array[1][0], wel.aux.get_data(1)[0]) assert np.allclose(wel.aux.array[2][0], wel.aux.get_data(2)[0]) assert not wel.has_stress_period_data + wel_q_array = wel.q.array + print(wel_q_array) q_nan = np.where(wel_q_array == DNODATA, np.nan, wel_q_array) - val_q = np.nansum(q_nan, axis=(1, 2, 3, 4)) + val_q = np.nansum(q_nan, axis=(1, 2, 3)) + print(val_q) assert val_q[0] == 0.0 assert val_q[1] == 0.25 assert val_q[2] == 0.1 - assert val_q[3] == 0.1 + assert val_q[3] == 0.0 val_q_2 = wel.q.get_data() assert val_q_2[0] is None assert val_q_2[1][0, 0, 0] == 0.25 @@ -2416,9 +2418,9 @@ def test_grid_array(function_tmpdir): wel_q_array = wel.q.array assert np.all(wel_q_array[0][0] == 0.0) - assert wel_q_array[1][0][0, 0, 0] == 0.25 - assert wel_q_array[2][0][0, 0, 0] == 0.1 - assert wel_q_array[3][0][0, 0, 0] == 0.1 + assert wel_q_array[1][0, 0, 0] == 0.25 + assert wel_q_array[2][0, 0, 0] == 0.1 + assert wel_q_array[3][0, 0, 0] == 0.1 welg_q_per = wel.q.get_data() assert welg_q_per[0] is None assert welg_q_per[1][0, 0, 0] == 0.25 @@ -2437,20 +2439,20 @@ def test_grid_array(function_tmpdir): # first axis is nper, second is naux, then grid drn_elev_array = drn.elev.array - assert drn_elev_array[0][0][0, 0, 0] == 60.0 - assert drn_elev_array[1][0][0, 0, 0] == 60.0 - assert drn_elev_array[2][0][0, 0, 0] == DNODATA - assert drn_elev_array[3][0][0, 0, 0] == 55.0 + assert drn_elev_array[0][0, 0, 0] == 60.0 + assert drn_elev_array[1][0, 0, 0] == 60.0 + assert drn_elev_array[2][0, 0, 0] == DNODATA + assert drn_elev_array[3][0, 0, 0] == 55.0 assert np.allclose(drn_elev_array[0], drn.elev.get_data(0)) assert drn.elev.get_data(1) is None assert np.allclose(drn_elev_array[2], drn.elev.get_data(2)) assert np.allclose(drn_elev_array[3], drn.elev.get_data(3)) ghb_bhead_array = ghb.bhead.array - assert ghb_bhead_array[0][0][0, 1, 1] == 60.0 - assert ghb_bhead_array[1][0][0, 1, 1] == 60.0 - assert ghb_bhead_array[2][0][0, 1, 1] == 60.0 - assert ghb_bhead_array[3][0][0, 1, 1] == 60.0 + assert ghb_bhead_array[0][0, 1, 1] == 60.0 + assert ghb_bhead_array[1][0, 1, 1] == 60.0 + assert ghb_bhead_array[2][0, 1, 1] == 60.0 + assert ghb_bhead_array[3][0, 1, 1] == 60.0 assert np.allclose(ghb_bhead_array[0], ghb.bhead.get_data(0)) assert ghb.bhead.get_data(1) is None assert ghb.bhead.get_data(2) is None diff --git a/flopy/discretization/grid.py b/flopy/discretization/grid.py index 30b61f57d9..2092d341fd 100644 --- a/flopy/discretization/grid.py +++ b/flopy/discretization/grid.py @@ -1210,7 +1210,7 @@ def read_usgs_model_reference_file(self, reffile="usgs.model.reference"): elif info[0] == "proj4": self.crs = data else: - print(" ->warn: update not applied.") + print(" -> warn: update not applied.") # model must be rotated first, before setting xoff and yoff # when xul and yul are provided. diff --git a/flopy/discretization/modeltime.py b/flopy/discretization/modeltime.py index e023909595..ae677042a4 100644 --- a/flopy/discretization/modeltime.py +++ b/flopy/discretization/modeltime.py @@ -831,7 +831,7 @@ def read_usgs_model_reference_file(self, reffile="usgs.model.reference"): else: start_date_time = data else: - print(" ->warn: update not applied.") + print(" -> warn: update not applied.") if len(start_date_time) > 0: self.start_datetime = start_date_time diff --git a/flopy/discretization/structuredgrid.py b/flopy/discretization/structuredgrid.py index 075720d6ae..1947318507 100644 --- a/flopy/discretization/structuredgrid.py +++ b/flopy/discretization/structuredgrid.py @@ -1835,7 +1835,7 @@ def dataset(self, modeltime=None, mesh=None, configuration=None): def _layered_mesh_dataset(self, ds, modeltime=None, configuration=None): FILLNA_INT32 = np.int32(-2147483647) FILLNA_DBL = 9.96920996838687e36 - lenunits = {0: "m", 1: "ft", 2: "m", 3: "m"} + lenunits = {0: "u", 1: "ft", 2: "m", 3: "cm"} # create dataset coordinate vars var_d = { @@ -1958,7 +1958,7 @@ def _layered_mesh_dataset(self, ds, modeltime=None, configuration=None): return ds def _structured_dataset(self, ds, modeltime=None, configuration=None): - lenunits = {0: "m", 1: "ft", 2: "m", 3: "m"} + lenunits = {0: "u", 1: "ft", 2: "m", 3: "cm"} xc = self.xoffset + self.xycenters[0] yc = self.yoffset + self.xycenters[1] @@ -2024,6 +2024,8 @@ def _structured_dataset(self, ds, modeltime=None, configuration=None): and configuration["longitude"] is not None ) + lats = None + lons = None if latlon_cfg: lats = configuration["latitude"] lons = configuration["longitude"] diff --git a/flopy/discretization/vertexgrid.py b/flopy/discretization/vertexgrid.py index 80823e64c9..42e9c6adc9 100644 --- a/flopy/discretization/vertexgrid.py +++ b/flopy/discretization/vertexgrid.py @@ -614,7 +614,7 @@ def dataset(self, modeltime=None, mesh=None, configuration=None): FILLNA_INT32 = np.int32(-2147483647) FILLNA_DBL = 9.96920996838687e36 - lenunits = {0: "m", 1: "ft", 2: "m", 3: "m"} + lenunits = {0: "u", 1: "ft", 2: "m", 3: "cm"} if mesh is None or mesh.upper() != "LAYERED": raise ValueError("Vextex grid only supports layered mesh datasets") diff --git a/flopy/mf6/data/mfdataarray.py b/flopy/mf6/data/mfdataarray.py index e4b2f5fe04..bfd18e9c51 100644 --- a/flopy/mf6/data/mfdataarray.py +++ b/flopy/mf6/data/mfdataarray.py @@ -735,6 +735,7 @@ def _get_data(self, layer=None, apply_mult=False, **kwargs): and isinstance(self, MFTransientArray) and data is not [] # noqa: F632 and not self._is_grid_aux() + and not "nodes" in self.structure.shape ): data = np.expand_dims(data, 0) return data diff --git a/flopy/mf6/data/mfdatastorage.py b/flopy/mf6/data/mfdatastorage.py index 477530b7bc..a991cfc910 100644 --- a/flopy/mf6/data/mfdatastorage.py +++ b/flopy/mf6/data/mfdatastorage.py @@ -2411,7 +2411,6 @@ def _build_full_data(self, apply_multiplier=False): layers_to_process = [0] else: layers_to_process = self.layer_storage.indexes() - layers_to_process = self.layer_storage.indexes() for layer in layers_to_process: if ( self.layer_storage[layer].factor is not None diff --git a/flopy/mf6/mfmodel.py b/flopy/mf6/mfmodel.py index 9b5103c584..846d07db69 100644 --- a/flopy/mf6/mfmodel.py +++ b/flopy/mf6/mfmodel.py @@ -1402,8 +1402,9 @@ def write( pp._set_netcdf_storage(reset=True) # write netcdf file - if write_netcdf and netcdf != "": - self._write_netcdf(mesh=netcdf) + if write_netcdf: + if netcdf != "": + self._write_netcdf(mesh=netcdf) if nc_fname is not None: self.name_file.nc_filerecord = None From 45a68457fb93810f38ce9947a1cf70dc37561dd1 Mon Sep 17 00:00:00 2001 From: mjreno Date: Thu, 9 Oct 2025 10:12:09 -0400 Subject: [PATCH 43/44] fix merge --- flopy/mf6/data/mfstructure.py | 34 ++++++++++++++-------------------- 1 file changed, 14 insertions(+), 20 deletions(-) diff --git a/flopy/mf6/data/mfstructure.py b/flopy/mf6/data/mfstructure.py index 4dad360318..96e9819a18 100644 --- a/flopy/mf6/data/mfstructure.py +++ b/flopy/mf6/data/mfstructure.py @@ -1725,9 +1725,6 @@ class MFSimulationStructure: get_data_structure(path : string) Returns a data structure if it exists, otherwise returns None. Data structure type returned is based on the tuple/list "path" - tag_read_array - Searches through all packages and tags any packages with a name that - indicates they are the READASARRAYS or READARRAYGRID version of a package. """ def __init__(self): @@ -1825,29 +1822,26 @@ def get_data_structure(self, path): else: return None - def tag_read_array(self): - for key, package_struct in self.package_struct_objs.items(): + def _tag_read_array(self): + for pkg_spec in self.pkg_spec.values(): if ( - package_struct.get_data_structure(('options', 'readasarrays')) + pkg_spec.get_data_structure(('options', 'readasarrays')) ): - package_struct.read_as_arrays = True + pkg_spec.read_as_arrays = True elif ( - package_struct.get_data_structure(('options', 'readarraygrid')) + pkg_spec.get_data_structure(('options', 'readarraygrid')) ): - package_struct.read_array_grid = True - for model_key, model_struct in self.model_struct_objs.items(): - for ( - key, - package_struct, - ) in model_struct.package_struct_objs.items(): + pkg_spec.read_array_grid = True + for mdl_spec in self.mdl_spec.values(): + for pkg_spec in mdl_spec.pkg_spec.values(): if ( - package_struct.get_data_structure(('options', 'readasarrays')) + pkg_spec.get_data_structure(('options', 'readasarrays')) ): - package_struct.read_as_arrays = True + pkg_spec.read_as_arrays = True elif ( - package_struct.get_data_structure(('options', 'readarraygrid')) + pkg_spec.get_data_structure(('options', 'readarraygrid')) ): - package_struct.read_array_grid = True + pkg_spec.read_array_grid = True class MFStructure: @@ -1890,5 +1884,5 @@ def _load(self): package.package_abbr ] = entry[1:] # process each package - self.sim_struct.process_dfn(DfnPackage(package)) - self.sim_struct.tag_read_array() + self.sim_spec.register(Dfn(package)) + self.sim_spec._tag_read_array() From 5ea9e0abeb9a74cb790ecefb482c6bd5f89c417f Mon Sep 17 00:00:00 2001 From: mjreno Date: Thu, 9 Oct 2025 12:48:12 -0400 Subject: [PATCH 44/44] more merge fixes --- flopy/mf6/mfpackage.py | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/flopy/mf6/mfpackage.py b/flopy/mf6/mfpackage.py index 065b337989..266872df41 100644 --- a/flopy/mf6/mfpackage.py +++ b/flopy/mf6/mfpackage.py @@ -3582,26 +3582,29 @@ def _add_entry(tagname, iaux=None, layer=None): @staticmethod def netcdf_package(mtype, ptype, auxiliary=None, mesh=None, nlay=1): - from .data.mfstructure import DfnPackage, MFSimulationStructure + from .data.mfstructure import Dfn, MFSimulationStructure entries = {} - sim_struct = MFSimulationStructure() + sim_spec = MFSimulationStructure() for package in MFPackage.__subclasses__(): - sim_struct.process_dfn(DfnPackage(package)) - p = DfnPackage(package) + p = Dfn(package) c, sc = p.dfn_file_name.split(".")[0].split("-") if c == mtype.lower() and sc == ptype.lower(): - sim_struct.add_package(p, model_file=False) + sim_spec.register(Dfn(package)) break - if ptype.lower() in sim_struct.package_struct_objs: - pso = sim_struct.package_struct_objs[ptype.lower()] - if pso.multi_package_support: + pkg_spec = None + if f"{mtype.lower()}6" in sim_spec.mdl_spec: + mdl_spec = sim_spec.mdl_spec[f"{mtype.lower()}6"] + if f"{ptype.lower()}" in mdl_spec.pkg_spec: + pkg_spec = mdl_spec.pkg_spec[f"{ptype.lower()}"] + if pkg_spec is not None: + if pkg_spec.multi_package_support: pname = f"<{ptype}name>" else: pname = ptype - for key, block in pso.blocks.items(): + for key, block in pkg_spec.blocks.items(): if key != "griddata" and key != "period": continue for d in block.data_structures: