Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
58 changes: 57 additions & 1 deletion nlmod/dims/grid.py
Original file line number Diff line number Diff line change
Expand Up @@ -309,7 +309,7 @@ def modelgrid_to_ds(mg=None, grbfile=None):
Dataset containing grid information
"""
if mg is None and grbfile is not None:
mg = flopy.utils.MfGrdFile(grbfile).modelgrid
mg = flopy.mf6.utils.MfGrdFile(grbfile).modelgrid
elif mg is None and grbfile is None:
raise ValueError("Either 'mg' or 'grbfile' should be specified!")
if mg.grid_type == "structured":
Expand Down Expand Up @@ -2124,3 +2124,59 @@ def get_affine(ds, sx=None, sy=None):
xoff = attrs["extent"][0]
yoff = attrs["extent"][3]
return Affine.translation(xoff, yoff) * Affine.scale(sx, sy)


def _shoelace_formula(x, y):
"""Calculate the area of a polygon using the shoelace formula.

Parameters
----------
x : np.ndarray
x-coordinates of the polygon.
y : np.ndarray
y-coordinates of the polygon.

Returns
-------
area : float
area of the polygon.
"""
x = x - np.min(x)
y = y - np.min(y)
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))


def get_area(ds):
"""Calculate the area of each cell in the model grid.

Parameters
----------
ds : xr.Dataset
model dataset.

Returns
-------
ds : xr.Dataset
model dataset with an area variable
"""
if ds.gridtype == "structured":
area = xr.DataArray(
np.outer(get_delc(ds), get_delr(ds)),
dims=("y", "x"),
coords={"y": ds["y"], "x": ds["x"]},
)
elif ds.gridtype == "vertex":
area = np.zeros(ds["icell2d"].size)
for icell2d in ds["icell2d"]:
area[icell2d] = _shoelace_formula(
ds["xv"][ds["icvert"].isel(icell2d=icell2d)],
ds["yv"][ds["icvert"].isel(icell2d=icell2d)],
)
area = xr.DataArray(
area,
dims=("icell2d"),
coords={"icell2d": ds["icell2d"]},
)
else:
raise ValueError("function only support structured or vertex gridtypes")
return area
58 changes: 35 additions & 23 deletions nlmod/dims/time.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
import cftime
import datetime as dt
import logging
import warnings

import cftime
import numpy as np
import pandas as pd
from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime, OutOfBoundsTimedelta
import xarray as xr
from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime, OutOfBoundsTimedelta
from xarray import IndexVariable

from .attributes_encodings import dim_attrs
Expand Down Expand Up @@ -155,7 +155,7 @@ def set_ds_time_deprecated(


def _pd_timestamp_to_cftime(time_pd):
"""convert a pandas timestamp into a cftime stamp
"""Convert a pandas timestamp into a cftime stamp

Parameters
----------
Expand All @@ -166,7 +166,6 @@ def _pd_timestamp_to_cftime(time_pd):
-------
cftime.datetime or list of cftime.datetime
"""

if hasattr(time_pd, "__iter__"):
return [_pd_timestamp_to_cftime(tpd) for tpd in time_pd]
else:
Expand Down Expand Up @@ -247,7 +246,9 @@ def set_ds_time(
# parse start
if isinstance(start, (int, np.integer, float)):
if isinstance(time[0], (int, np.integer, float, str)):
raise TypeError("Make sure 'start' or 'time' argument is a valid TimeStamp")
raise TypeError(
"Make sure 'start' or 'time' argument is a valid TimeStamp"
)
start = time[0] - pd.to_timedelta(start, "D")
elif isinstance(start, str):
start = pd.Timestamp(start)
Expand Down Expand Up @@ -278,13 +279,14 @@ def set_ds_time(
elif isinstance(time[0], cftime.datetime):
start = _pd_timestamp_to_cftime(start)
else:
msg = (
f"Cannot process 'time' argument. Datatype -> {type(time)} not understood."
)
msg = f"Cannot process 'time' argument. Datatype -> {type(time)} not understood."
raise TypeError(msg)
except (OutOfBoundsDatetime, OutOfBoundsTimedelta) as e:
msg = "cannot convert 'start' and 'time' to pandas datetime, use cftime types for 'start' and 'time'"
raise type(e)(msg)
msg = (
"cannot convert 'start' and 'time' to pandas datetime, use cftime "
"types for 'start' and 'time'"
)
raise type(e)(msg) from e

if time[0] <= start:
msg = (
Expand Down Expand Up @@ -355,7 +357,6 @@ def set_ds_time_numeric(
ds : xarray.Dataset
model dataset with added time coordinate
"""

if time is None and perlen is None:
raise (ValueError("Please specify either time or perlen in set_ds_time"))
elif perlen is not None:
Expand Down Expand Up @@ -393,7 +394,7 @@ def set_ds_time_numeric(


def set_time_variables(ds, start, time, steady, steady_start, time_units, nstp, tsmult):
"""add data variables: steady, nstp and tsmult, set attributes: start, time_units
"""Add data variables: steady, nstp and tsmult, set attributes: start, time_units

Parameters
----------
Expand Down Expand Up @@ -735,13 +736,13 @@ def dataframe_to_flopy_timeseries(
)


def ds_time_to_pandas_index(ds, include_start=True):
def ds_time_to_pandas_index(da_or_ds, include_start=True):
"""Convert xarray time index to pandas datetime index.

Parameters
----------
ds : xarray.Dataset
dataset with time index
da_or_ds : xarray.Dataset or xarray.DataArray
dataset with time index or time data array
include_start : bool, optional
include the start time in the index, by default True

Expand All @@ -750,13 +751,24 @@ def ds_time_to_pandas_index(ds, include_start=True):
pd.DatetimeIndex
pandas datetime index
"""
if isinstance(da_or_ds, xr.Dataset):
da = da_or_ds["time"]
elif isinstance(da_or_ds, xr.DataArray):
da = da_or_ds
else:
raise ValueError(
"Either pass model dataset or time dataarray (e.g. `ds.time`)."
)

if include_start:
if ds.time.dtype.kind == "M": # "M" is a numpy datetime-type
return ds.time.to_index().insert(0, pd.Timestamp(ds.time.start))
elif ds.time.dtype.kind == "O":
start = _pd_timestamp_to_cftime(pd.Timestamp(ds.time.start))
return ds.time.to_index().insert(0, start)
elif ds.time.dtype.kind in ["i", "f"]:
return ds.time.to_index().insert(0, 0)
if da.dtype.kind == "M": # "M" is a numpy datetime-type
return da.to_index().insert(0, pd.Timestamp(da.start))
elif da.dtype.kind == "O":
start = _pd_timestamp_to_cftime(pd.Timestamp(da.start))
return da.to_index().insert(0, start)
elif da.dtype.kind in ["i", "f"]:
return da.to_index().insert(0, 0)
else:
raise TypeError("Unknown time dtype")
else:
return ds.time.to_index()
return da.to_index()
2 changes: 1 addition & 1 deletion nlmod/plot/plot.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def modelgrid(ds, ax=None, **kwargs):
if ax is None:
_, ax = plt.subplots(figsize=(10, 10))
ax.set_aspect("auto")
modelgrid = modelgrid_from_ds(ds)
modelgrid = modelgrid_from_ds(ds, rotated=kwargs.get("rotated", False))
extent = None if ax.get_autoscale_on() else ax.axis()
modelgrid.plot(ax=ax, **kwargs)
if extent is not None:
Expand Down
8 changes: 6 additions & 2 deletions nlmod/plot/plotutil.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,10 +70,14 @@ def add_background_map(ax, crs=EPSG_28992, map_provider="nlmaps.standaard", **kw
"""
import contextily as ctx

if isinstance(crs, (str, int)):
if isinstance(crs, str):
import pyproj

proj = pyproj.Proj(crs)
proj = pyproj.CRS.from_string(crs)
elif isinstance(crs, int):
import pyproj

proj = pyproj.CRS.from_epsg(crs)

providers = _list_contextily_providers()
ctx.add_basemap(ax, source=providers[map_provider], crs=proj.srs, **kwargs)
Expand Down
28 changes: 14 additions & 14 deletions nlmod/read/geotop.py
Original file line number Diff line number Diff line change
Expand Up @@ -451,55 +451,55 @@ def add_kh_and_kv(
strat_un = np.unique(strat[~np.isnan(strat)])
kh_ar = np.full(strat.shape, 0.0)
kv_ar = np.full(strat.shape, 0.0)
probality_total = np.full(strat.shape, 0.0)
probability_total = np.full(strat.shape, 0.0)
for ilithok in df["lithok"].unique():
if ilithok == 0:
# there are no probabilities defined for lithoclass 'antropogeen'
continue
probality = gt[f"kans_{ilithok}"].values
probability = gt[f"kans_{ilithok}"].values
if "strat" in df:
khi, kvi = _handle_nans_in_stochastic_approach(
np.nan, np.nan, kh_method, kv_method
)
khi = np.full(strat.shape, khi)
kvi = np.full(strat.shape, kvi)
for istrat in strat_un:
mask = (strat == istrat) & (probality > 0)
mask = (strat == istrat) & (probability > 0)
if not mask.any():
continue
kh_sel, kv_sel = _get_kh_kv_from_df(
df, ilithok, istrat, anisotropy=anisotropy, mask=mask
)
if np.isnan(kh_sel):
probality[mask] = 0.0
probability[mask] = 0.0
kh_sel, kv_sel = _handle_nans_in_stochastic_approach(
kh_sel, kv_sel, kh_method, kv_method
)
khi[mask], kvi[mask] = kh_sel, kv_sel
else:
khi, kvi = _get_kh_kv_from_df(df, ilithok, anisotropy=anisotropy)
if np.isnan(khi):
probality[:] = 0.0
probability[:] = 0.0
khi, kvi = _handle_nans_in_stochastic_approach(
khi, kvi, kh_method, kv_method
)
if kh_method == "arithmetic_mean":
kh_ar = kh_ar + probality * khi
kh_ar = kh_ar + probability * khi
else:
kh_ar = kh_ar + (probality / khi)
kh_ar = kh_ar + (probability / khi)
if kv_method == "arithmetic_mean":
kv_ar = kv_ar + probality * kvi
kv_ar = kv_ar + probability * kvi
else:
kv_ar = kv_ar + (probality / kvi)
probality_total += probality
kv_ar = kv_ar + (probability / kvi)
probability_total += probability
if kh_method == "arithmetic_mean":
kh_ar = kh_ar / probality_total
kh_ar = kh_ar / probability_total
else:
kh_ar = probality_total / kh_ar
kh_ar = probability_total / kh_ar
if kv_method == "arithmetic_mean":
kv_ar = kv_ar / probality_total
kv_ar = kv_ar / probability_total
else:
kv_ar = probality_total / kv_ar
kv_ar = probability_total / kv_ar
else:
raise (ValueError(f"Unsupported value for stochastic: '{stochastic}'"))

Expand Down