diff --git a/autotest/test_starfit_flow_node.py b/autotest/test_starfit_flow_node.py index 799893df..bf7fc17a 100644 --- a/autotest/test_starfit_flow_node.py +++ b/autotest/test_starfit_flow_node.py @@ -13,7 +13,10 @@ from pywatershed.parameters import Parameters, StarfitParameters # NB: -# Here we are comparing a daily starfit against an hourly StarfitNode. +# Here we are comparing a daily offline starfit against an hourly +# StarfitNode. The reference output is the mean value from offline runs run +# from 1995-2001 in the file +# ../test_data/starfit/starfit_mean_output_1995-2001.nc # We only advance the hourly StarfitNode one substepper day. It's # resulting flow rates are identical but the change in storage is 1/24 # of the daily value, so we check this. We have to track previous storage @@ -29,12 +32,12 @@ # & (parameters_ds.end_time >= np.datetime64("2001-12-31 00:00:00")) # fmt: off starfit_inds_test = [ - 0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, + 0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 15, 16, 18, 20, 21, 22, 23, 24, 25, 26, 28, 29, 30, 31, 32, 33, 36, 37, 38, 40, 43, 44, 47, 48, 49, 51, 52, 53, 55, 56, 59, 62, 63, 64, 65, 67, 68, 69, 70, 71, 72, 74, 75, 76, 77, 86, 87, 89, 90, 91, 92, 93, - 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, + 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 122, 123, 130, 134, 137, 139, 140, 141, 145, 148, 149, 152, 154, 155, 156, 157, 158, 159, 160, 161, 162, 164, 165, 166 diff --git a/doc/whats-new.rst b/doc/whats-new.rst index 7725a26e..1a64e8e4 100644 --- a/doc/whats-new.rst +++ b/doc/whats-new.rst @@ -35,7 +35,7 @@ New Features (DFW) routing from PRMS NHM input files and a few simple assumptions. The lateral (to-channel) fluxes from a PRMS are used as time varying boundary conditions. A new notebook runs the Delaware River Basin using MF6 DFW: - `examples/mmr_to_mf6_dfw.ipynb `__. + `examples/07_mmr_to_mf6_chf_dfw.ipynb `__. (:pull:`290`) By `James McCreight `_. - The depression storage option for PRMSRunoff is implemented and tested. (:pull:`279`) By `James McCreight `_. @@ -96,6 +96,8 @@ Internal changes PRMSGroundwater: 1.0e-8, PRMSGroundwaterNoDprst: 1.0e-8, PRMSChannel: 5.0e-7) for all test domains. (:pull:`288`) By `James McCreight `_. +- Migration to Numpy 2.0+. + (:pull:`310`) By `James McCreight `_. .. _whats-new.1.1.0: diff --git a/examples/02_prms_legacy_models.ipynb b/examples/02_prms_legacy_models.ipynb index b92b51b9..a0cbcc87 100644 --- a/examples/02_prms_legacy_models.ipynb +++ b/examples/02_prms_legacy_models.ipynb @@ -195,7 +195,9 @@ "metadata": {}, "outputs": [], "source": [ - "control = pws.Control.load_prms(domain_dir / \"nhm.control\", warn_unused_options=False)\n", + "control = pws.Control.load_prms(\n", + " domain_dir / \"nhm.control\", warn_unused_options=False\n", + ")\n", "\n", "control" ] diff --git a/examples/06_flow_graph_starfit.ipynb b/examples/06_flow_graph_starfit.ipynb index cfee95ac..b44385fc 100644 --- a/examples/06_flow_graph_starfit.ipynb +++ b/examples/06_flow_graph_starfit.ipynb @@ -8,13 +8,15 @@ "# PRMSChannel FlowGraph with a STARFIT Reservoir: Big Sandy Reservoir\n", "\n", "This notebook demonstrates the capabilities of the `FlowGraph` class and its associated classes\n", - "`FlowNode` and `FlowNodeMaker` in a real-world example. This example starts from an existing\n", - "flow graph which is in `PRMSChannel` and adds in a single new node to represent a reservoir\n", - "within the `PRMSChannel` simulation. The `FlowGraph` is the class which is able to take different\n", - "flow methods and combine them in user-specified ways. In this case we combine nodes of class\n", - "`PRMSChannelFlowNode` with one node of class `StarfitFlowNode`. \n", + "`FlowNode` and `FlowNodeMaker` in a real-world example. This example starts from an existing graph of \n", + "flow, embedded in `PRMSChannel` and its parameters, and adds in a single new node to represent a reservoir\n", + "within the `PRMSChannel` simulation. \n", "\n", - "Please see these links to the documentation for more details on \n", + "The `FlowGraph` is the class which is able to take different flow methods and combine them in \n", + "user-specified ways. In this case we combine nodes of class `PRMSChannelFlowNode` (a re-expression\n", + "of `PRMSChannel` as a `FlowNode` to work with `FlowGraph`) with one node of class `StarfitFlowNode`. \n", + "\n", + "Please see these links to the documentation for more details on each:\n", "[`FlowGraph`](https://pywatershed.readthedocs.io/en/latest/api/generated/pywatershed.FlowGraph.html), \n", "[`StarfitFlowNode`](https://pywatershed.readthedocs.io/en/latest/api/generated/pywatershed.StarfitFlowNode.html), and \n", "[`PRMSChannelFlowNode`](https://pywatershed.readthedocs.io/en/latest/api/generated/pywatershed.PRMSChannelFlowNode.html)." @@ -85,27 +87,16 @@ { "cell_type": "code", "execution_count": null, - "id": "92ec574d-111e-4637-8389-9bb777a7f4db", - "metadata": {}, - "outputs": [], - "source": [ - "pkg_root = pws.constants.__pywatershed_root__\n", - "big_sandy_param_file = pkg_root / \"data/big_sandy_starfit_parameters.nc\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "efe20ee6-f2ad-4627-934f-810e52c1d516", + "id": "c60dd131-c4d9-49cf-a376-556db6c23935", "metadata": {}, "outputs": [], "source": [ - "sf_params = pws.Parameters.from_netcdf(big_sandy_param_file, use_xr=True)\n", - "\n", - "# This is how to get parameters for reservoirs in the ISTARF-CONUS database.\n", - "# The GRanD file must be downloaded manually from\n", + "# Below in this cell is how one would get parameters for reservoirs in the ISTARF-CONUS database.\n", + "# We use pre-canned parameters and do not do this here because the GRanD file\n", + "# must be downloaded manually from\n", "# https://ln.sync.com/dl/bd47eb6b0/anhxaikr-62pmrgtq-k44xf84f-pyz4atkm/view/default/447819520013\n", - "# and unpacked and placed in the location below.\n", + "# unpacked and placed in the location below, which can not be automated\n", + "# for testing this notebook.\n", "# param_src_dir = nb_output_dir / \"param_sources\"\n", "# param_src_dir.mkdir(exist_ok=True)\n", "# grand_file = param_src_dir / \"GRanD_Version_1_3/GRanD_reservoirs_v1_3.dbf\"\n", @@ -116,6 +107,18 @@ "# )" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "92ec574d-111e-4637-8389-9bb777a7f4db", + "metadata": {}, + "outputs": [], + "source": [ + "pkg_root = pws.constants.__pywatershed_root__\n", + "big_sandy_param_file = pkg_root / \"data/big_sandy_starfit_parameters.nc\"\n", + "sf_params = pws.Parameters.from_netcdf(big_sandy_param_file, use_xr=True)" + ] + }, { "cell_type": "code", "execution_count": null, @@ -169,7 +172,7 @@ "metadata": {}, "outputs": [], "source": [ - "DomainPlot(\n", + "domain_plot = DomainPlot(\n", " hru_shp_file=shp_file_hru,\n", " segment_shp_file=shp_file_seg,\n", " hru_parameters=domain_dir / \"parameters_dis_hru.nc\",\n", @@ -198,7 +201,7 @@ "id": "1170d616-a760-455d-a88f-19ffc2205121", "metadata": {}, "source": [ - "From the above, by mousing over the segments we can see the reservoir should be inserted above nhm_seg 44426 and below nhm_segs 44434 and 44435. \n", + "From the above, by mousing over the segments we can see the reservoir would be inserted above nhm_seg 44426 and below nhm_segs 44434 and 44435. \n", "\n", "For more context, zooming out shows the full Flaming Gorge Domain on the Green River. The openstreetmap layers shows that Big Sandy Dike is located near Farson, WY. In the EsriSatellite layer, we observe this is a very dry, high plains region with farming downstream of the Big Sandy and Eden reservoirs around Farson. We can also see that the reservoir is fed by snowpack and seasonal runoff from the high Wind River Range to the Northeast. The photo of Arrowhead Lake below (taken by the author in August 2023) looks southeast at Temple Mountain, across the furthest upstream HRU of the Big Sandy Dike. \n", "![Arrowhead Lake, August 2023](static/arrowhead_lake.jpg)" @@ -210,8 +213,7 @@ "metadata": {}, "source": [ "## NHM Run on Flaming Gorge Domain: NO Big Sandy\n", - "The NHM does not represent any reservoirs. From above, we'll assume the outflows of Big Sandy are on segment 44426. We'll see how the NHM represents flow at Big Sandy.\n", - "We can run pywatershed using the \"legacy instantation\" as described in Notebook 02." + "The NHM does not represent any reservoirs. From the above plot, we'll assume the outflows of Big Sandy would be at segment 44426. Let's see how the NHM represents flow at this location, without any reservoir representation. We can run pywatershed using the \"legacy instantation\" as described in Notebook 02." ] }, { @@ -332,7 +334,7 @@ "source": [ "## FlowGraph in Model: NHM with a STARFIT representation of Big Sandy\n", "\n", - "Because FlowGraph is not part of PRMS, we cant run FlowGraph with PRMS/NHM using the legacy instantiation (eg. notebook 02). We have to use a multi-process model, the pywatershed way (e.g. notebook 01). The next three cells build the multi-process model above the FlowGraph. We then use a helper function to insert the STARFIT resevoir into the PRMS/NHM Muskingum-Mann channel routing and append it to our multi-process model." + "Because FlowGraph is not part of PRMS, we cant run FlowGraph with PRMS/NHM using the legacy instantiation (eg. notebook 02). We have to use a multi-process model, and set it up \"the pywatershed way\" (as described in notebook 01). The next three cells build the multi-process model which flows into the FlowGraph. We then use a helper function to insert the STARFIT resevoir into the `FlowNode` representation of the PRMS/NHM Muskingum-Mann channel routing and we append this `FlowGraph` to our multi-process model." ] }, { @@ -431,9 +433,9 @@ "id": "f229c613-56fe-49c0-b211-f0f047eddc57", "metadata": {}, "source": [ - "Now we have a model dictionary describing everything above the `PRMSChannel` (Musking-Mann). We have a very nice helper function, `prms_channel_flow_graph_to_model_dict`, we can use to add a `FlowGraph` to this model. The function takes the existing `model_dict`, the `PRMSChannel` information, plus additional user-supplied information, to construct a `FlowGraph` with a new `StarfitFlowNode` inserted in the `PRMSChannel` at the location above nhm segment 44426 (and below 44434 and 44435) to represent Big Sandy. This `FlowGraph` instance is added to the `model_dict` by name \"prms_channel_flow_graph\". \n", + "Now we have a model dictionary describing all the processes which flow into the `PRMSChannel` (Musking-Mann). We have a very nice helper function, `prms_channel_flow_graph_to_model_dict`, we can use to add a `FlowGraph` to this model. The function takes the existing `model_dict`, the `PRMSChannel` data, plus additional user-supplied information, to construct a `FlowGraph` new nodes inserted in to the `PRMSChannel`. In this case we'll add a single new node to the `PMRSChannel`, this will be a `StarfitFlowNode` inserted at the location above nhm segment 44426 (and below 44434 and 44435) to represent the Big Sandy dike. This `FlowGraph` instance is finaly added to the `model_dict` with the name \"prms_channel_flow_graph\". \n", "\n", - "The function will also add an `InflowExchange` instance to the `model_dict` named \"inflow_exchange\" which will manage getting the fluxes from PRMS to the FlowGraph. Zero lateral flows are supplied to the StarfitNode for Big Sandy in this case (though we could do otherwise)." + "We'll see that the `prms_channel_flow_graph_to_model_dict` helper function will also add an `InflowExchange` instance to the `model_dict` named \"inflow_exchange\". This `InflowExchange` which will manage getting the fluxes from the other process into to the FlowGraph. Zero lateral flows are supplied to the StarfitNode for Big Sandy in this case (though we could do otherwise)." ] }, { @@ -554,80 +556,19 @@ ")" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "1c20b0dc-0402-45e2-88e5-5aa22c994726", - "metadata": {}, - "outputs": [], - "source": [ - "import inspect\n", - "\n", - "max_nor = pws.hydrology.starfit.max_nor\n", - "min_nor = pws.hydrology.starfit.min_nor\n", - "\n", - "\n", - "def get_param_names(func):\n", - " return list(inspect.signature(func).parameters.keys())\n", - "\n", - "\n", - "max_nor_names = get_param_names(max_nor)\n", - "min_nor_names = get_param_names(min_nor)\n", - "\n", - "max_nor_params = {\n", - " kk: vv for kk, vv in sf_params.parameters.items() if kk in max_nor_names\n", - "}\n", - "min_nor_params = {\n", - " kk: vv for kk, vv in sf_params.parameters.items() if kk in min_nor_names\n", - "}\n", - "\n", - "omega = 1.0 / 52.0\n", - "max_nor_params[\"omega\"] = omega\n", - "min_nor_params[\"omega\"] = omega\n", - "\n", - "datetime_epiweek = pws.utils.time_utils.datetime_epiweek\n", - "epiweeks = [datetime_epiweek(tt) for tt in outflow.time.values]\n", - "\n", - "max_nor_ts = np.zeros_like(epiweeks, dtype=np.float64) * np.nan\n", - "min_nor_ts = np.zeros_like(epiweeks, dtype=np.float64) * np.nan\n", - "\n", - "for ii, ee in enumerate(epiweeks):\n", - " max_nor_ts[ii] = max_nor(**max_nor_params, epiweek=ee)[0]\n", - " min_nor_ts[ii] = min_nor(**min_nor_params, epiweek=ee)[0]\n", - "\n", - "capacity = sf_params.parameters[\"GRanD_CAP_MCM\"] * pws.constants.cm_to_cf\n", - "max_nor = outflow.copy().rename(\"max_nor\")\n", - "min_nor = outflow.copy().rename(\"min_nor\")\n", - "\n", - "max_nor[:] = max_nor_ts * capacity / 100.0\n", - "min_nor[:] = min_nor_ts * capacity / 100.0\n", - "\n", - "capacity = outflow.copy().rename(\"capacity\") * 0 + capacity\n", - "\n", - "xr.merge([storage_nodes, max_nor, min_nor, capacity]).rename(\n", - " {\n", - " \"node_storages\": \"Big Sandy Storage\",\n", - " \"max_nor\": \"Max NOR\",\n", - " \"min_nor\": \"Min NOR\",\n", - " }\n", - ").hvplot(\n", - " width=plot_width,\n", - " height=plot_height,\n", - " ylabel=\"storage (million cubic feet)\",\n", - ")" - ] - }, { "cell_type": "markdown", "id": "14432433-2385-4c21-992a-a05fc9524f11", "metadata": {}, "source": [ "## FlowGraph as a post-process: Drive FlowGraph with STARFIT representation of Big Sandy and Pass-Through using NHM output files\n", - "Above we ran the full NHM with a `StarfitNode` at Big Sandy. But pywatershed is flexible and in the NHM configuration no two process representations are two-way coupled. See [figure in the extended release notes](https://ec-usgs.github.io/pywatershed/assets/img/pywatershed_NHM_model_graph.png). (Note that some PRMS configurations in pywatershed can be two-way coupled between Runoff and Soilzone and/or Canopy and Snow.) In this case, the `PRMSChannel` is one-way coupled (forced) buy the rest of the model. So we could use the output of the first, NHM run above without any reservoir representation and use its outupts to drive just the `FlowGraph` in the run above. We might call running `FlowGraph` in this way a \"post-process\". If one were running the no-reservoir model and looking at hypotheses of what FlowGraphs give better flow representations, this is the method you'd want to follow.\n", + "Above we ran the equivalent of the full NHM but with a `StarfitNode` inserted at Big Sandy. Pywatershed is flexible and no two process representations are two-way coupled in the NHM configuration. This means that the `FlowGraph` in the model run above could be run as a post-process on the rest of the model chain.\n", + "\n", + "In fact, we can use the output of the first model run above, without any reservoir representation, to drive just the `FlowGraph` in the previous run. We call running `FlowGraph` in this way a \"post-process\". If one were running the no-reservoir model and investigating hypotheses of what FlowGraph designs give better flow representations, this is the method you'd want to follow instead of running all the model processes above `FlowGraph` every time.\n", "\n", - "So for this case we have a different helper function, `prms_channel_flow_graph_postprocess`, to which we supply most of the same information about the `FlowGraph`. However, we tell it about where it can find inputs from file rather than about an existing `model_dict` (as above).\n", + "For this post-process case we have a different helper function, `prms_channel_flow_graph_postprocess`, to which we supply most of the same information about the `FlowGraph`. However, we tell it about where it can find inputs from file rather than about an existing `model_dict` (as in the previous model above).\n", "\n", - "For additional extra fun and illustration, we'll not only add the `StarfitNode` for Big Sandy, we'll demonstrate that we can add additional nodes to the `FlowGraph` by putting a random `PassThroughFlowNode` elsewhere on the domain. This node has no effect on the flows by design, but adding it here shows how additional nodes can easily be added to a `FlowGraph`." + "For extra fun and illustration, we'll not only add the `StarfitNode` for Big Sandy, we'll demonstrate that we can add additional nodes to the `FlowGraph` by putting a random `PassThroughFlowNode` elsewhere on the domain. This node has no effect on the flows by design, but adding it here shows how additional nodes can easily be added to a `FlowGraph`." ] }, { @@ -705,6 +646,7 @@ " 44426,\n", " 44435,\n", " ], # the second is a pass through above the first\n", + " addtl_output_vars=[\"spill\", \"release\"],\n", ")" ] }, @@ -736,10 +678,14 @@ "metadata": {}, "outputs": [], "source": [ - "wh_44426 = np.where(params.parameters[\"nhm_seg\"] == 44426)[0]\n", - "outflow_nodes_post = xr.open_dataarray(run_dir / \"node_outflows.nc\")[\n", - " :, wh_44426\n", - "].rename(\"node_outflows_post\")\n", + "# wh_44426 = np.where(params.parameters[\"nhm_seg\"] == 44426)[0]\n", + "outflow_nodes_post = xr.open_dataarray(run_dir / \"node_outflows.nc\")\n", + "wh_big_sandy = (outflow_nodes_post.node_maker_id == 999) & (\n", + " outflow_nodes_post.node_maker_name == \"starfit\"\n", + ")\n", + "outflow_nodes_post = outflow_nodes_post[:, wh_big_sandy].rename(\n", + " \"node_outflows_post\"\n", + ")\n", "outflow_nodes_post = outflow_nodes_post.drop_vars(\n", " set(outflow_nodes_post.coords) - {\"time\"}\n", ")" @@ -774,9 +720,11 @@ "metadata": {}, "outputs": [], "source": [ - "storage_nodes_post = xr.open_dataarray(run_dir / \"node_storages.nc\")[\n", - " :, -2\n", - "].rename( # pass through is the last node this time\n", + "storage_nodes_post = xr.open_dataarray(run_dir / \"node_storages.nc\")\n", + "wh_big_sandy = (storage_nodes_post.node_maker_id == 999) & (\n", + " storage_nodes_post.node_maker_name == \"starfit\"\n", + ")\n", + "storage_nodes_post = storage_nodes_post[:, wh_big_sandy].rename(\n", " \"node_storages_post\"\n", ")\n", "storage_nodes_post = storage_nodes_post.drop_vars(\n", @@ -823,6 +771,54 @@ " ylabel=\"streamflow (cfs)\\nstorage (million cubic feet)\",\n", ")" ] + }, + { + "cell_type": "markdown", + "id": "7c74f780-a421-441b-a7ee-f2ff24b759ee", + "metadata": {}, + "source": [ + "While the `FlowGraph` itself only looks at lateral inflows, upstream inflows, and total outflows at each node, there may be other variables of interest on a node for the user. Looking at the properties or attributes of an individual `FlowNode` reveals what other variables are available for each node of that type. Above, the argument `addtl_output_vars=[\"spill\", \"release\"]` was passed in the call to `pws.prms_channel_flow_graph_postprocess`. This requests that these variables are output to NetCDF files on nodes where they are available. Nodes where these variables are not available will contain missing (NaN) values. In the case of `StarfitFlowNode`s, the total outflow has two components, the spill and the release. From the node outflow variable, we can not see the individual contribution of these terms. So we request these variables are output and we see that in the second summer (June 13-16, 1980) there is indeed a spill event on Big Sandy which contributes to the total outflow." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cb9cf577-85e6-49ce-8a66-8862beb5aeca", + "metadata": {}, + "outputs": [], + "source": [ + "spill = xr.open_dataarray(run_dir / \"spill.nc\")[:, wh_big_sandy]\n", + "release = xr.open_dataarray(run_dir / \"release.nc\")[:, wh_big_sandy]\n", + "drop_vars = set(spill.coords) - {\"time\"}\n", + "spill = spill.drop_vars(drop_vars)\n", + "release = release.drop_vars(drop_vars)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f6f20212-0e10-4b75-9533-62002324d4e5", + "metadata": {}, + "outputs": [], + "source": [ + "xr.merge(\n", + " [\n", + " outflow_nodes_post,\n", + " spill,\n", + " release,\n", + " ]\n", + ").rename(\n", + " {\n", + " \"node_outflows_post\": f\"Big Sandy Outflow CAP*{cap_mult}\",\n", + " \"spill\": f\"Big Sandy Spill CAP*{cap_mult}\",\n", + " \"release\": f\"Big Sandy Release CAP*{cap_mult}\",\n", + " }\n", + ").hvplot(\n", + " width=plot_width,\n", + " height=plot_height,\n", + " ylabel=\"streamflow (cfs)\",\n", + ")" + ] } ], "metadata": { diff --git a/examples/model_loop_custom_output.ipynb b/examples/model_loop_custom_output.ipynb index 3c88ed48..3a5c0717 100644 --- a/examples/model_loop_custom_output.ipynb +++ b/examples/model_loop_custom_output.ipynb @@ -283,9 +283,9 @@ " \n", " proc = model.processes[var_proc[var]]\n", " dim_name = needed_metadata[var][\"dims\"][0]\n", - " dim_len = proc.params.dims[dim_name]\n", + " dim_len = proc._params.dims[dim_name]\n", " coord_name = dim_coord[dim_name]\n", - " coord_data = proc.params.coords[dim_coord[dim_name]]\n", + " coord_data = proc._params.coords[dim_coord[dim_name]]\n", " type = needed_metadata[var][\"type\"]\n", " \n", " var_meta = {\n", diff --git a/pywatershed/analysis/process_plot.py b/pywatershed/analysis/process_plot.py index 2f85a4c3..99f1d8e3 100644 --- a/pywatershed/analysis/process_plot.py +++ b/pywatershed/analysis/process_plot.py @@ -41,7 +41,7 @@ def __init__( # if (self.__seg_poly.crs.name # == "USA_Contiguous_Albers_Equal_Area_Conic_USGS_version"): # print("Overriding USGS aea crs with EPSG:5070") - self.seg_gdf.crs = "EPSG:5070" + self.seg_gdf.set_crs("EPSG:5070") self.seg_geoms_exploded = ( self.seg_gdf.explode(index_parts=True) diff --git a/pywatershed/hydrology/starfit.py b/pywatershed/hydrology/starfit.py index 18523ffe..cc8a1a64 100644 --- a/pywatershed/hydrology/starfit.py +++ b/pywatershed/hydrology/starfit.py @@ -536,6 +536,12 @@ class StarfitFlowNode(FlowNode): computed in a :class:`FlowGraph`. The solution has the option for subtimestep or daily computations. + Daily computations have the same outflows on the substeps of a day and + outflows and storages are calculated on the last subtimestep. On the first + subtimestep, we use the inflow of the first subtimestep as representative + of the mean inflow of the previous day in order to calculate an average + outflow for the first timestep. + The STARFIT reference: Sean W.D. Turner, Jennie Clarice Steyaert, Laura Condon, Nathalie Voisin,