diff --git a/.idea/.gitignore b/.idea/.gitignore
new file mode 100644
index 0000000..26d3352
--- /dev/null
+++ b/.idea/.gitignore
@@ -0,0 +1,3 @@
+# Default ignored files
+/shelf/
+/workspace.xml
diff --git a/.idea/HydroDLAdj.iml b/.idea/HydroDLAdj.iml
new file mode 100644
index 0000000..d0876a7
--- /dev/null
+++ b/.idea/HydroDLAdj.iml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/inspectionProfiles/Project_Default.xml b/.idea/inspectionProfiles/Project_Default.xml
new file mode 100644
index 0000000..9e0a012
--- /dev/null
+++ b/.idea/inspectionProfiles/Project_Default.xml
@@ -0,0 +1,24 @@
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/inspectionProfiles/profiles_settings.xml b/.idea/inspectionProfiles/profiles_settings.xml
new file mode 100644
index 0000000..105ce2d
--- /dev/null
+++ b/.idea/inspectionProfiles/profiles_settings.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/misc.xml b/.idea/misc.xml
new file mode 100644
index 0000000..dc00212
--- /dev/null
+++ b/.idea/misc.xml
@@ -0,0 +1,4 @@
+
+
+
+
\ No newline at end of file
diff --git a/.idea/modules.xml b/.idea/modules.xml
new file mode 100644
index 0000000..321f79a
--- /dev/null
+++ b/.idea/modules.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/vcs.xml b/.idea/vcs.xml
new file mode 100644
index 0000000..35eb1dd
--- /dev/null
+++ b/.idea/vcs.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/HydroModels/HBV_Module.py b/HydroModels/HBV_Module.py
new file mode 100644
index 0000000..cf2f126
--- /dev/null
+++ b/HydroModels/HBV_Module.py
@@ -0,0 +1,131 @@
+import torch
+import torch.nn as nn
+
+class HBV_Module(nn.Module):
+ def __init__(self, climate_data,nfea):
+ super().__init__()
+ self.climate_data = climate_data
+ self.nfea = nfea
+
+ def forward(self, y,theta,t,returnFlux=False,aux=None):
+ ##parameters
+ bs = theta.shape[0]
+
+ nfea = self.nfea
+ theta = theta.view(bs,nfea)
+ parascaLst = [[1,6], [50,1000], [0.05,0.9], [0.01,0.5], [0.001,0.2], [0.2,1],
+ [0,10], [0,100], [-2.5,2.5], [0.5,10], [0,0.1], [0,0.2], [0.3,5]] # HBV para
+
+ Beta = parascaLst[0][0] + theta[:,0]*(parascaLst[0][1]-parascaLst[0][0])
+ FC = parascaLst[1][0] + theta[:,1]*(parascaLst[1][1]-parascaLst[1][0])
+ K0 = parascaLst[2][0] + theta[:,2]*(parascaLst[2][1]-parascaLst[2][0])
+ K1 = parascaLst[3][0] + theta[:,3]*(parascaLst[3][1]-parascaLst[3][0])
+ K2 = parascaLst[4][0] + theta[:,4]*(parascaLst[4][1]-parascaLst[4][0])
+ LP = parascaLst[5][0] + theta[:,5]*(parascaLst[5][1]-parascaLst[5][0])
+ PERC = parascaLst[6][0] + theta[:,6]*(parascaLst[6][1]-parascaLst[6][0])
+ UZL = parascaLst[7][0] + theta[:,7]*(parascaLst[7][1]-parascaLst[7][0])
+ TT = parascaLst[8][0] + theta[:,8]*(parascaLst[8][1]-parascaLst[8][0])
+ CFMAX = parascaLst[9][0] + theta[:,9]*(parascaLst[9][1]-parascaLst[9][0])
+ CFR = parascaLst[10][0] + theta[:,10]*(parascaLst[10][1]-parascaLst[10][0])
+ CWH = parascaLst[11][0] + theta[:,11]*(parascaLst[11][1]-parascaLst[11][0])
+ BETAET = parascaLst[12][0] + theta[:,12]*(parascaLst[12][1]-parascaLst[12][0])
+
+
+
+ PRECS = 0
+ ##% stores
+ SNOWPACK = torch.clamp(y[:,0] , min=PRECS) #SNOWPACK
+ MELTWATER = torch.clamp(y[:,1], min=PRECS) #MELTWATER
+ SM = torch.clamp(y[:,2], min=1e-8) #SM
+ SUZ = torch.clamp(y[:,3] , min=PRECS) #SUZ
+ SLZ = torch.clamp(y[:,4], min=PRECS) #SLZ
+ dS = torch.zeros(y.shape[0],y.shape[1]).to(y)
+ fluxes = torch.zeros((y.shape[0],1)).to(y)
+
+ climate_in = self.climate_data[int(t),:,:]; ##% climate at this step
+ P = climate_in[:,0];
+ Ep = climate_in[:,2];
+ T = climate_in[:,1];
+
+ ##% fluxes functions
+ flux_sf = self.snowfall(P,T,TT);
+ flux_refr = self.refreeze(CFR,CFMAX,T,TT,MELTWATER);
+ flux_melt = self.melt(CFMAX,T,TT,SNOWPACK);
+ flux_rf = self.rainfall(P,T,TT);
+ flux_Isnow = self.Isnow(MELTWATER,CWH,SNOWPACK);
+ flux_PEFF = self.Peff(SM,FC,Beta,flux_rf,flux_Isnow);
+ flux_ex = self.excess(SM,FC);
+ flux_et = self.evap(SM,FC,LP,Ep,BETAET);
+ flux_perc = self.percolation(PERC,SUZ);
+ flux_q0 = self.interflow(K0,SUZ,UZL);
+ flux_q1 = self.baseflow(K1,SUZ);
+ flux_q2 = self.baseflow(K2,SLZ);
+
+
+ #% stores ODEs
+ dS[:,0] = flux_sf + flux_refr - flux_melt
+ dS[:,1] = flux_melt - flux_refr - flux_Isnow
+ dS[:,2] = flux_Isnow + flux_rf - flux_PEFF - flux_ex - flux_et
+ dS[:,3] = flux_PEFF + flux_ex - flux_perc - flux_q0 - flux_q1
+ dS[:,4] = flux_perc - flux_q2
+
+ fluxes[:,0] =flux_q0 + flux_q1 + flux_q2
+
+ if returnFlux:
+ return fluxes,flux_q0.unsqueeze(-1),flux_q1.unsqueeze(-1),flux_q2.unsqueeze(-1),flux_et.unsqueeze(-1)
+ else:
+ return dS,fluxes
+
+
+
+
+
+ def rechange(self, C,NDC,FC,SM,SLZ):
+ return C*SLZ*(1.0-torch.clamp(SM/(NDC*FC),max = 1.0))
+
+
+
+ def snowfall(self,P,T,TT):
+ return torch.mul(P, (T < TT))
+
+ def refreeze(self,CFR,CFMAX,T,TT,MELTWATER):
+ refreezing = CFR * CFMAX * (TT - T)
+ refreezing = torch.clamp(refreezing, min=0.0)
+ return torch.min(refreezing, MELTWATER)
+
+ def melt(self,CFMAX,T,TT,SNOWPACK):
+ melt = CFMAX * (T - TT)
+ melt = torch.clamp(melt, min=0.0)
+ return torch.min(melt, SNOWPACK)
+
+ def rainfall(self,P,T,TT):
+
+ return torch.mul(P, (T >= TT))
+ def Isnow(self,MELTWATER,CWH,SNOWPACK):
+ tosoil = MELTWATER - (CWH * SNOWPACK)
+ tosoil = torch.clamp(tosoil, min=0.0)
+ return tosoil
+
+ def Peff(self,SM,FC,Beta,flux_rf,flux_Isnow):
+ soil_wetness = (SM / FC) ** Beta
+ soil_wetness = torch.clamp(soil_wetness, min=0.0, max=1.0)
+ return (flux_rf + flux_Isnow) * soil_wetness
+
+ def excess(self,SM,FC):
+ excess = SM - FC
+ return torch.clamp(excess, min=0.0)
+
+
+ def evap(self,SM,FC,LP,Ep,BETAET):
+ evapfactor = (SM / (LP * FC)) ** BETAET
+ evapfactor = torch.clamp(evapfactor, min=0.0, max=1.0)
+ ETact = Ep * evapfactor
+ return torch.min(SM, ETact)
+
+ def interflow(self,K0,SUZ,UZL):
+ return K0 * torch.clamp(SUZ - UZL, min=0.0)
+ def percolation(self,PERC,SUZ):
+ return torch.min(SUZ, PERC)
+
+ def baseflow(self,K,S):
+ return K * S
diff --git a/HydroModels/HBV_Module_add_cr.py b/HydroModels/HBV_Module_add_cr.py
new file mode 100644
index 0000000..39a09a3
--- /dev/null
+++ b/HydroModels/HBV_Module_add_cr.py
@@ -0,0 +1,139 @@
+import torch
+import torch.nn as nn
+
+class HBV_Module_add_cr(nn.Module):
+ def __init__(self, climate_data,nfea):
+ super().__init__()
+ self.climate_data = climate_data
+ self.nfea = nfea
+
+ def forward(self, y,theta,t,returnFlux=False,aux=None):
+ ##parameters
+ bs = theta.shape[0]
+
+ nfea = self.nfea
+ theta = theta.view(bs,nfea)
+ parascaLst = [[1,6], [50,1000], [0.05,0.9], [0.01,0.5], [0.001,0.2], [0.2,1],
+ [0,10], [0,100], [-2.5,2.5], [0.5,10], [0,0.1], [0,0.2], [0.3,5],[0,1],[0.05,0.95]] # HBV para
+
+ Beta = parascaLst[0][0] + theta[:,0]*(parascaLst[0][1]-parascaLst[0][0])
+ FC = parascaLst[1][0] + theta[:,1]*(parascaLst[1][1]-parascaLst[1][0])
+ K0 = parascaLst[2][0] + theta[:,2]*(parascaLst[2][1]-parascaLst[2][0])
+ K1 = parascaLst[3][0] + theta[:,3]*(parascaLst[3][1]-parascaLst[3][0])
+ K2 = parascaLst[4][0] + theta[:,4]*(parascaLst[4][1]-parascaLst[4][0])
+ LP = parascaLst[5][0] + theta[:,5]*(parascaLst[5][1]-parascaLst[5][0])
+ PERC = parascaLst[6][0] + theta[:,6]*(parascaLst[6][1]-parascaLst[6][0])
+ UZL = parascaLst[7][0] + theta[:,7]*(parascaLst[7][1]-parascaLst[7][0])
+ TT = parascaLst[8][0] + theta[:,8]*(parascaLst[8][1]-parascaLst[8][0])
+ CFMAX = parascaLst[9][0] + theta[:,9]*(parascaLst[9][1]-parascaLst[9][0])
+ CFR = parascaLst[10][0] + theta[:,10]*(parascaLst[10][1]-parascaLst[10][0])
+ CWH = parascaLst[11][0] + theta[:,11]*(parascaLst[11][1]-parascaLst[11][0])
+ BETAET = parascaLst[12][0] + theta[:,12]*(parascaLst[12][1]-parascaLst[12][0])
+ C = parascaLst[13][0] + theta[:,13]*(parascaLst[13][1]-parascaLst[13][0])
+ NDC = parascaLst[14][0] + theta[:,14]*(parascaLst[14][1]-parascaLst[14][0])
+
+
+
+ PRECS = 0
+ ##% stores
+ SNOWPACK = torch.clamp(y[:,0] , min=PRECS) #SNOWPACK
+ MELTWATER = torch.clamp(y[:,1], min=PRECS) #MELTWATER
+ SM = torch.clamp(y[:,2], min=1e-8) #SM
+ SUZ = torch.clamp(y[:,3] , min=PRECS) #SUZ
+ SLZ = torch.clamp(y[:,4], min=PRECS) #SLZ
+ dS = torch.zeros(y.shape[0],y.shape[1]).to(y)
+ fluxes = torch.zeros((y.shape[0],1)).to(y)
+
+ climate_in = self.climate_data[int(t),:,:]; ##% climate at this step
+ P = climate_in[:,0];
+ Ep = climate_in[:,2];
+ T = climate_in[:,1];
+
+ ##% fluxes functions
+
+ flux_sf = self.snowfall(P,T,TT);
+ flux_refr = self.refreeze(CFR,CFMAX,T,TT,MELTWATER);
+ flux_melt = self.melt(CFMAX,T,TT,SNOWPACK);
+ flux_rf = self.rainfall(P,T,TT);
+ flux_Isnow = self.Isnow(MELTWATER,CWH,SNOWPACK);
+ flux_PEFF = self.Peff(SM,FC,Beta,flux_rf,flux_Isnow);
+ flux_ex = self.excess(SM,FC);
+ flux_et = self.evap(SM,FC,LP,Ep,BETAET);
+ flux_perc = self.percolation(PERC,SUZ);
+ flux_q0 = self.interflow(K0,SUZ,UZL);
+ flux_q1 = self.baseflow(K1,SUZ);
+ flux_q2 = self.baseflow(K2,SLZ);
+ flux_recharge = self.recharge(C,NDC,FC,SM,SLZ)
+
+
+ #% stores ODEs
+ dS[:,0] = flux_sf + flux_refr - flux_melt
+ dS[:,1] = flux_melt - flux_refr - flux_Isnow
+ dS[:,2] = flux_Isnow + flux_rf - flux_PEFF - flux_ex - flux_et + flux_recharge
+ dS[:,3] = flux_PEFF + flux_ex - flux_perc - flux_q0 - flux_q1
+ dS[:,4] = flux_perc - flux_q2 - flux_recharge
+
+ fluxes[:,0] =flux_q0 + flux_q1 + flux_q2
+
+
+ if returnFlux:
+ return fluxes,flux_q0.unsqueeze(-1),flux_q1.unsqueeze(-1),flux_q2.unsqueeze(-1),flux_et.unsqueeze(-1)
+ else:
+ return dS,fluxes
+
+
+
+
+
+
+ def recharge(self, C,NDC,FC,SM,SLZ):
+ return C*SLZ*(1.0-torch.clamp(SM/(NDC*FC),max = 1.0))
+
+
+
+
+
+ def snowfall(self,P,T,TT):
+ return torch.mul(P, (T < TT))
+
+ def refreeze(self,CFR,CFMAX,T,TT,MELTWATER):
+ refreezing = CFR * CFMAX * (TT - T)
+ refreezing = torch.clamp(refreezing, min=0.0)
+ return torch.min(refreezing, MELTWATER)
+
+ def melt(self,CFMAX,T,TT,SNOWPACK):
+ melt = CFMAX * (T - TT)
+ melt = torch.clamp(melt, min=0.0)
+ return torch.min(melt, SNOWPACK)
+
+ def rainfall(self,P,T,TT):
+
+ return torch.mul(P, (T >= TT))
+ def Isnow(self,MELTWATER,CWH,SNOWPACK):
+ tosoil = MELTWATER - (CWH * SNOWPACK)
+ tosoil = torch.clamp(tosoil, min=0.0)
+ return tosoil
+
+ def Peff(self,SM,FC,Beta,flux_rf,flux_Isnow):
+ soil_wetness = (SM / FC) ** Beta
+ soil_wetness = torch.clamp(soil_wetness, min=0.0, max=1.0)
+ return (flux_rf + flux_Isnow) * soil_wetness
+
+ def excess(self,SM,FC):
+ excess = SM - FC
+ return torch.clamp(excess, min=0.0)
+
+
+ def evap(self,SM,FC,LP,Ep,BETAET):
+ evapfactor = (SM / (LP * FC)) ** BETAET
+ evapfactor = torch.clamp(evapfactor, min=0.0, max=1.0)
+ ETact = Ep * evapfactor
+ return torch.min(SM, ETact)
+
+ def interflow(self,K0,SUZ,UZL):
+ return K0 * torch.clamp(SUZ - UZL, min=0.0)
+ def percolation(self,PERC,SUZ):
+ return torch.min(SUZ, PERC)
+
+ def baseflow(self,K,S):
+ return K * S
diff --git a/LICENSE b/LICENSE
index f288702..5680100 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,674 +1,31 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc.
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
-
- Copyright (C)
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see .
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- Copyright (C)
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-.
+Non-Commercial Software License Agreement
+
+By downloading the hydroDL software (the “Software”) you agree to
+the following terms of use:
+Copyright (c) 2020, The Pennsylvania State University (“PSU”). All rights reserved.
+
+1. PSU hereby grants to you a perpetual, nonexclusive and worldwide right, privilege and
+license to use, reproduce, modify, display, and create derivative works of Software for all
+non-commercial purposes only. You may not use Software for commercial purposes without
+prior written consent from PSU. Queries regarding commercial licensing should be directed
+to The Office of Technology Management at 814.865.6277 or otminfo@psu.edu.
+2. Neither the name of the copyright holder nor the names of its contributors may be used
+to endorse or promote products derived from this software without specific prior written
+permission.
+3. This software is provided for non-commercial use only.
+4. Redistribution and use in source and binary forms, with or without modification, are
+permitted provided that redistributions must reproduce the above copyright notice, license,
+list of conditions and the following disclaimer in the documentation and/or other materials
+provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/data/func.py b/data/func.py
new file mode 100644
index 0000000..b2e4650
--- /dev/null
+++ b/data/func.py
@@ -0,0 +1,131 @@
+import pickle
+import numpy as np
+
+def basinNorm_mmday(x, basinarea, toNorm):
+ nd = len(x.shape)
+ if nd == 3 and x.shape[2] == 1:
+ x = x[:, :, 0] # unsqueeze the original 3 dimension matrix
+ temparea = np.tile(basinarea, (1, x.shape[1]))
+ #tempprep = np.tile(meanprep, (1, x.shape[1]))
+ if toNorm is True:
+ flow = (x * 0.0283168 * 3600 * 24) / (
+ (temparea * (10 ** 6))* (10 ** (-3))
+ ) # (m^3/day)/(m^3/day)
+ else:
+
+ flow = (
+ x
+ * ((temparea * (10 ** 6))* (10 ** (-3)))
+ / (0.0283168 * 3600 * 24)
+ )
+ if nd == 3:
+ flow = np.expand_dims(flow, axis=2)
+ return flow
+
+# This part defines a hydrology-specific Scaler class that works similar as sklearn.MinMaxScaler
+# getStatDic, calcStat, calcStatgamma are supporting functions
+def getStatDic(log_norm_cols, attrLst=None, attrdata=None, seriesLst=None, seriesdata=None):
+ statDict = dict()
+ # series data
+ if seriesLst is not None:
+ for k in range(len(seriesLst)):
+ var = seriesLst[k]
+ if var in log_norm_cols:
+ statDict[var] = calcStatgamma(seriesdata[:, :, k])
+ else:
+ statDict[var] = calcStat(seriesdata[:, :, k])
+
+ # const attribute
+ if attrLst is not None:
+ for k in range(len(attrLst)):
+ var = attrLst[k]
+ statDict[var] = calcStat(attrdata[:, k])
+ return statDict
+
+def calcStat(x):
+ a = x.flatten()
+ b = a[~np.isnan(a)]
+ p10 = np.percentile(b, 10).astype(float)
+ p90 = np.percentile(b, 90).astype(float)
+ mean = np.mean(b).astype(float)
+ std = np.std(b).astype(float)
+ if std < 0.001:
+ std = 1
+ return [p10, p90, mean, std]
+
+def calcStatgamma(x): # for daily streamflow and precipitation
+ a = x.flatten()
+ b = a[~np.isnan(a)] # kick out Nan
+ b = np.log10(
+ np.sqrt(b) + 0.1
+ ) # do some tranformation to change gamma characteristics
+ p10 = np.percentile(b, 10).astype(float)
+ p90 = np.percentile(b, 90).astype(float)
+ mean = np.mean(b).astype(float)
+ std = np.std(b).astype(float)
+ if std < 0.001:
+ std = 1
+ return [p10, p90, mean, std]
+
+def transNormbyDic( x_in, var_lst, stat_dict, log_norm_cols, to_norm):
+ if type(var_lst) is str:
+ var_lst = [var_lst]
+ x = x_in.copy()
+ out = np.full(x.shape, np.nan)
+ for k in range(len(var_lst)):
+ var = var_lst[k]
+ stat = stat_dict[var]
+ if to_norm is True:
+ if len(x.shape) == 3:
+ if var in log_norm_cols:
+ x[:, :, k] = np.log10(np.sqrt(x[:, :, k]) + 0.1)
+ out[:, :, k] = (x[:, :, k] - stat[2]) / stat[3]
+ elif len(x.shape) == 2:
+ if var in log_norm_cols:
+ x[:, k] = np.log10(np.sqrt(x[:, k]) + 0.1)
+ out[:, k] = (x[:, k] - stat[2]) / stat[3]
+ else:
+ if len(x.shape) == 3:
+ out[:, :, k] = x[:, :, k] * stat[3] + stat[2]
+ if var in log_norm_cols:
+ out[:, :, k] = (np.power(10, out[:, :, k]) - 0.1) ** 2
+ elif len(x.shape) == 2:
+ out[:, k] = x[:, k] * stat[3] + stat[2]
+ if var in log_norm_cols:
+ out[:, k] = (np.power(10, out[:, k]) - 0.1) ** 2
+ return out
+
+
+
+class HydroScaler:
+ def __init__(self, attrLst, seriesLst, xNanFill,log_norm_cols):
+ self.log_norm_cols = log_norm_cols
+ self.attrLst = attrLst
+ self.seriesLst = seriesLst
+ self.stat_dict = None
+ self.xNanFill = xNanFill
+
+ def fit(self, attrdata, seriesdata):
+ self.stat_dict = getStatDic(
+ log_norm_cols=self.log_norm_cols,
+ attrLst=self.attrLst,
+ attrdata=attrdata,
+ seriesLst=self.seriesLst,
+ seriesdata=seriesdata,
+ )
+
+ def transform(self, data, var_list,):
+
+ norm_data = transNormbyDic(
+ data, var_list, self.stat_dict, log_norm_cols = self.log_norm_cols, to_norm=True)
+
+ return norm_data
+
+ def fit_transform(self, attrdata, seriesdata):
+ self.fit(attrdata, seriesdata)
+ attr_norm = self.transform(attrdata, self.attrLst)
+ series_norm = self.transform(seriesdata, self.seriesLst)
+ return attr_norm, series_norm
+
+
+
diff --git a/examples/main_HBV_adjoint.py b/examples/main_HBV_adjoint.py
new file mode 100644
index 0000000..5630835
--- /dev/null
+++ b/examples/main_HBV_adjoint.py
@@ -0,0 +1,211 @@
+# This code is written by Yalan Song from MHPI group, Penn State Univerity
+# Purpose: This code solves ODEs of hydrological models with Adjoint
+import torch
+import numpy as np
+import os
+import sys
+
+sys.path.append('../..')
+from HydroDLAdj.nnModel import train_module
+from HydroDLAdj.nnModel import test_module
+from HydroDLAdj.nnModel import lstmModel_module
+from HydroDLAdj.data import func
+
+import random
+import glob
+import re
+import pickle
+import pandas as pd
+##Set the random numbers
+randomseed = 111111
+random.seed(randomseed)
+torch.manual_seed(randomseed)
+np.random.seed(randomseed)
+torch.cuda.manual_seed(randomseed)
+torch.backends.cudnn.deterministic = True
+torch.backends.cudnn.benchmark = False
+
+## Set the GPU machine to use
+gpuid = 0
+torch.cuda.set_device(gpuid)
+device = torch.device("cuda")
+dtype=torch.float32
+
+## To create the pickle file for CAMELS data, you can use the code the following link:
+# https://colab.research.google.com/drive/1oq5onUpekCjuInTlnEdT3TGR39SSjr8F?usp=sharing#scrollTo=FGViqzC__BCw
+## Or use the following command lines to directly get training_file and validation_file, but in this way, the input variables and training/test periods are fixed
+#Ttrain = [19801001, 19951001] #training period
+#valid_date = [19951001, 20101001] # Testing period
+#!pip install gdown
+#!gdown 1HrO-8A0l7qgVVz6pIRFfqZin2ZxAG72B
+#!gdown 1ZPI_ypIpF9o-YzZnC9mc-Ti2t-c6g7F5
+#!gdown 1VhjjKE7KYcGIeWlihOP9fQtZ71E7ufjl
+## Load the data
+datapath = "/data/yxs275/hydroDL/example/notebook/datatest/"
+train_file = datapath+'training_file'
+
+## Path to save your model
+saveFolder = "/data/yxs275/NROdeSolver/output/HBVtest_module_hbv_1_2_13_dynamic_rout_static_no_threshold/"
+# Load X, Y, C from a file
+with open(train_file, 'rb') as f:
+ train_x, train_y, train_c = pickle.load(f) # Adjust this line based on your file format
+##Forcings from the pickle file are precipitaion and temperature from Daymet
+## PET prepared by MHPI group from [19801001, 20101001]
+data_PET_all = np.load(datapath+"PET.npy" )
+time_PET = pd.date_range('1980-10-01', f'2010-09-30', freq='d')
+data_PET = data_PET_all[:,time_PET.get_loc('1980-10-01'):time_PET.get_loc('1995-10-01')]
+
+##List of attributes
+attrLst = [ 'p_mean','pet_mean', 'p_seasonality', 'frac_snow', 'aridity', 'high_prec_freq', 'high_prec_dur',
+ 'low_prec_freq', 'low_prec_dur', 'elev_mean', 'slope_mean', 'area_gages2', 'frac_forest', 'lai_max',
+ 'lai_diff', 'gvf_max', 'gvf_diff', 'dom_land_cover_frac', 'dom_land_cover', 'root_depth_50',
+ 'soil_depth_pelletier', 'soil_depth_statsgo', 'soil_porosity', 'soil_conductivity',
+ 'max_water_content', 'sand_frac', 'silt_frac', 'clay_frac', 'geol_1st_class', 'glim_1st_class_frac',
+ 'geol_2nd_class', 'glim_2nd_class_frac', 'carbonate_rocks_frac', 'geol_porostiy', 'geol_permeability']
+
+
+
+basinarea = train_c[:,np.where(np.array(attrLst)=='area_gages2')[0]]
+
+streamflow_data = func.basinNorm_mmday(train_y, basinarea, toNorm=True)
+##List of forcing used in the hydrological model
+forcing_lst = ['prcp','tmean','PET']
+
+
+xTrain = np.concatenate((train_x,np.expand_dims(data_PET,axis=-1)),axis = -1)
+
+
+#Data normalization for inputs of NN
+log_norm_cols = [ "prcp" ]
+
+scaler = func.HydroScaler(attrLst=attrLst, seriesLst=forcing_lst, xNanFill=0.0, log_norm_cols=log_norm_cols)
+
+attri_norm, xTrain_norm = scaler.fit_transform(train_c, xTrain.copy())
+attri_norm[np.isnan(attri_norm)] = 0.0
+xTrain_norm[np.isnan(xTrain_norm)] = 0.0
+
+attri_norm = np.expand_dims(attri_norm, axis=1)
+attri_norm = np.repeat(attri_norm, xTrain_norm.shape[1], axis=1)
+data_norm = np.concatenate((xTrain_norm, attri_norm), axis=-1)
+
+
+#Hyperparameters
+bs = 100 ##batch size
+nS = 5 ## number of state variables
+nEpoch = 50 ##number of epochs
+alpha = 0.25 ##aplha in the loss function
+rho = 365 ###time length of batch
+buffTime = 365 ##length of warmup period
+delta_t = torch.tensor(1.0).to(device = device,dtype = dtype) ## Time step (one day)
+
+nflux = 1 ## Number of target fluxes, only one : streamflwo
+
+
+ninv = data_norm.shape[-1]
+nfea = 13 ## number of physical parameters in the hyfrological models
+routn = 15 ## Length of the routing window
+nmul = 16 ## number of components
+hiddeninv = 256 ## hidden size of NN
+drinv = 0.5 ## dropout rate
+model = lstmModel_module.lstmModel(ninv, nfea, nmul, hiddeninv, drinv) ##Initializate the NN (LSTM)
+tdlst = [1,2, 13] ## index of the dynamic parameter
+tdRepS = [str(ix) for ix in tdlst]
+
+startEpoch = 0 ## Start from epoch 0
+rerun = False ## Swtich for continuously rerun the model
+
+if rerun:
+
+ weights_filenames = []
+ for fidx, f in enumerate(glob.glob(saveFolder+"model_Ep*")):
+ weights_filenames.append(f)
+ print(re.split("(\d+)",f))
+ weights_filenames.sort(key = lambda x: int(re.split("(\d+)",x)[1]))
+
+
+ path_model = saveFolder+weights_filenames[-1]
+ print("Reading ", path_model)
+ model = torch.load(path_model, map_location=f"cuda:{gpuid}")
+ startEpoch = int(re.split("(\d+)",weights_filenames[-1])[1])+1
+
+if os.path.exists(saveFolder) is False:
+ os.mkdir(saveFolder)
+
+model_name = "HBV_Module"
+
+
+train_module.trainModel(xTrain,
+ streamflow_data,
+ data_norm,
+ nS,
+ nflux,
+ nfea,
+ nmul,
+ model,
+ delta_t,
+ alpha,
+ tdlst,
+ startEpoch=startEpoch,
+ nEpoch=nEpoch,
+ miniBatch=[bs, rho],
+ buffTime=buffTime,
+ saveFolder=saveFolder,
+ routn=routn,
+ model_name=model_name
+ )
+
+
+## Model validation
+## Load the trained model
+testepoch = 50
+testbs = 200 #batchsize for testing
+model_file = saveFolder + f'/model_Ep{testepoch}.pt'
+print("Reading ", model_file)
+model = torch.load(model_file, map_location=f"cuda:{gpuid}")
+
+
+validation_file = datapath+'validation_file'
+with open(validation_file, 'rb') as g:
+ val_x, val_y, val_c = pickle.load(g)
+
+basinarea_val = val_c[:,np.where(np.array(attrLst)=='area_gages2')[0]]
+
+streamflow_data_val = func.basinNorm_mmday(val_y, basinarea, toNorm=True)
+
+
+data_PET_val = data_PET_all[:,time_PET.get_loc('1995-10-01'):time_PET.get_loc('2010-09-30')+1]
+xTrain_val = np.concatenate((val_x,np.expand_dims(data_PET_val,axis=-1)),axis = -1)
+
+attri_norm_val = scaler.transform(val_c, attrLst)
+xTrain_norm_val = scaler.transform(xTrain_val, forcing_lst)
+attri_norm_val[np.isnan(attri_norm_val)] = 0.0
+xTrain_norm_val[np.isnan(xTrain_norm_val)] = 0.0
+
+attri_norm_val= np.expand_dims(attri_norm_val, axis=1)
+attri_norm_val = np.repeat(attri_norm_val, xTrain_norm_val.shape[1], axis=1)
+data_norm_val = np.concatenate((xTrain_norm_val, attri_norm_val), axis=-1)
+
+warmuplength = 730 ## Use 2 years training data to warmup the simulation in validation
+xTrain_val = np.concatenate((xTrain[:,-warmuplength:,:], xTrain_val), axis= 1)
+data_norm_val = np.concatenate((data_norm[:,-warmuplength:,:], data_norm_val), axis= 1)
+
+y = np.load("/data/yxs275/NROdeSolver/CAMELSData/obs_test.npy")
+x = np.load("/data/yxs275/NROdeSolver/CAMELSData/HBV_test.npy")[:,-y.shape[1]:,:]
+z = np.load("/data/yxs275/NROdeSolver/CAMELSData/LSTM_test.npy")[:,-y.shape[1]:,:]
+
+
+test_module.testModel(xTrain_val,
+ streamflow_data_val,
+ data_norm_val,
+ nS,
+ nflux,
+ nfea,
+ nmul,
+ model,
+ delta_t,
+ tdlst,
+ bs = testbs,
+ saveFolder=saveFolder,
+ routn=routn,
+ model_name=model_name
+ )
diff --git a/examples/main_HBV_adjoint_stucture_change.py b/examples/main_HBV_adjoint_stucture_change.py
new file mode 100644
index 0000000..e1049d1
--- /dev/null
+++ b/examples/main_HBV_adjoint_stucture_change.py
@@ -0,0 +1,211 @@
+# This code is written by Yalan Song from MHPI group, Penn State Univerity
+# Purpose: This code solves ODEs of hydrological models with Adjoint
+import torch
+import numpy as np
+import os
+import sys
+
+sys.path.append('../..')
+from HydroDLAdj.nnModel import train_module
+from HydroDLAdj.nnModel import test_module
+from HydroDLAdj.nnModel import lstmModel_module
+from HydroDLAdj.data import func
+
+import random
+import glob
+import re
+import pickle
+import pandas as pd
+##Set the random numbers
+randomseed = 111111
+random.seed(randomseed)
+torch.manual_seed(randomseed)
+np.random.seed(randomseed)
+torch.cuda.manual_seed(randomseed)
+torch.backends.cudnn.deterministic = True
+torch.backends.cudnn.benchmark = False
+
+## Set the GPU machine to use
+gpuid = 0
+torch.cuda.set_device(gpuid)
+device = torch.device("cuda")
+dtype=torch.float32
+
+## To create the pickle file for CAMELS data, you can use the code the following link:
+# https://colab.research.google.com/drive/1oq5onUpekCjuInTlnEdT3TGR39SSjr8F?usp=sharing#scrollTo=FGViqzC__BCw
+## Or use the following command lines to directly get training_file and validation_file, but in this way, the input variables and training/test periods are fixed
+#Ttrain = [19801001, 19951001] #training period
+#valid_date = [19951001, 20101001] # Testing period
+#!pip install gdown
+#!gdown 1HrO-8A0l7qgVVz6pIRFfqZin2ZxAG72B
+#!gdown 1ZPI_ypIpF9o-YzZnC9mc-Ti2t-c6g7F5
+## Load the data
+datapath = "/data/yxs275/hydroDL/example/notebook/datatest/"
+train_file = datapath+'training_file'
+
+## Path to save your model
+saveFolder = "/data/yxs275/NROdeSolver/output/HBVtest_module_hbv_1_2_13_dynamic_rout_static_no_threshold/"
+# Load X, Y, C from a file
+with open(train_file, 'rb') as f:
+ train_x, train_y, train_c = pickle.load(f) # Adjust this line based on your file format
+##Forcings from the pickle file are precipitaion and temperature from Daymet
+## PET prepared by MHPI group from [19801001, 20101001]
+data_PET_all = np.load(datapath+"val_PET.npy" )
+time_PET = pd.date_range('1980-10-01', f'2010-09-30', freq='d')
+data_PET = data_PET_all[:,time_PET.get_loc('1980-10-01'):time_PET.get_loc('1995-10-01')]
+
+##List of attributes
+attrLst = [ 'p_mean','pet_mean', 'p_seasonality', 'frac_snow', 'aridity', 'high_prec_freq', 'high_prec_dur',
+ 'low_prec_freq', 'low_prec_dur', 'elev_mean', 'slope_mean', 'area_gages2', 'frac_forest', 'lai_max',
+ 'lai_diff', 'gvf_max', 'gvf_diff', 'dom_land_cover_frac', 'dom_land_cover', 'root_depth_50',
+ 'soil_depth_pelletier', 'soil_depth_statsgo', 'soil_porosity', 'soil_conductivity',
+ 'max_water_content', 'sand_frac', 'silt_frac', 'clay_frac', 'geol_1st_class', 'glim_1st_class_frac',
+ 'geol_2nd_class', 'glim_2nd_class_frac', 'carbonate_rocks_frac', 'geol_porostiy', 'geol_permeability']
+
+
+
+basinarea = train_c[:,np.where(np.array(attrLst)=='area_gages2')[0]]
+
+streamflow_data = func.basinNorm_mmday(train_y, basinarea, toNorm=True)
+##List of forcing used in the hydrological model
+forcing_lst = ['prcp','tmean','PET']
+
+
+xTrain = np.concatenate((train_x,np.expand_dims(data_PET,axis=-1)),axis = -1)
+
+
+#Data normalization for inputs of NN
+log_norm_cols = [ "prcp" ]
+
+scaler = func.HydroScaler(attrLst=attrLst, seriesLst=forcing_lst, xNanFill=0.0, log_norm_cols=log_norm_cols)
+
+attri_norm, xTrain_norm = scaler.fit_transform(train_c, xTrain.copy())
+attri_norm[np.isnan(attri_norm)] = 0.0
+xTrain_norm[np.isnan(xTrain_norm)] = 0.0
+
+attri_norm = np.expand_dims(attri_norm, axis=1)
+attri_norm = np.repeat(attri_norm, xTrain_norm.shape[1], axis=1)
+data_norm = np.concatenate((xTrain_norm, attri_norm), axis=-1)
+
+
+#Hyperparameters
+bs = 100 ##batch size
+nS = 5 ## number of state variables
+nEpoch = 50 ##number of epochs
+alpha = 0.25 ##aplha in the loss function
+rho = 365 ###time length of batch
+buffTime = 365 ##length of warmup period
+delta_t = torch.tensor(1.0).to(device = device,dtype = dtype) ## Time step (one day)
+
+nflux = 1 ## Number of target fluxes, only one : streamflwo
+
+
+ninv = data_norm.shape[-1]
+##New structure has 15 parameters
+nfea = 15 ## number of physical parameters in the hyfrological models
+routn = 15 ## Length of the routing window
+nmul = 16 ## number of components
+hiddeninv = 256 ## hidden size of NN
+drinv = 0.5 ## dropout rate
+model = lstmModel_module.lstmModel(ninv, nfea, nmul, hiddeninv, drinv) ##Initializate the NN (LSTM)
+tdlst = [1,2, 13] ## index of the dynamic parameter
+tdRepS = [str(ix) for ix in tdlst]
+
+startEpoch = 0 ## Start from epoch 0
+rerun = False ## Swtich for continuously rerun the model
+
+if rerun:
+
+ weights_filenames = []
+ for fidx, f in enumerate(glob.glob(saveFolder+"model_Ep*")):
+ weights_filenames.append(f)
+ print(re.split("(\d+)",f))
+ weights_filenames.sort(key = lambda x: int(re.split("(\d+)",x)[1]))
+
+
+ path_model = saveFolder+weights_filenames[-1]
+ print("Reading ", path_model)
+ model = torch.load(path_model, map_location=f"cuda:{gpuid}")
+ startEpoch = int(re.split("(\d+)",weights_filenames[-1])[1])+1
+
+if os.path.exists(saveFolder) is False:
+ os.mkdir(saveFolder)
+
+model_name = "HBV_Module_add_cr"
+
+
+train_module.trainModel(xTrain,
+ streamflow_data,
+ data_norm,
+ nS,
+ nflux,
+ nfea,
+ nmul,
+ model,
+ delta_t,
+ alpha,
+ tdlst,
+ startEpoch=startEpoch,
+ nEpoch=nEpoch,
+ miniBatch=[bs, rho],
+ buffTime=buffTime,
+ saveFolder=saveFolder,
+ routn=routn,
+ model_name=model_name
+ )
+
+
+## Model validation
+## Load the trained model
+testepoch = 50
+testbs = 200 #batchsize for testing
+model_file = saveFolder + f'/model_Ep{testepoch}.pt'
+print("Reading ", model_file)
+model = torch.load(model_file, map_location=f"cuda:{gpuid}")
+
+
+validation_file = datapath+'validation_file'
+with open(validation_file, 'rb') as g:
+ val_x, val_y, val_c = pickle.load(g)
+
+basinarea_val = val_c[:,np.where(np.array(attrLst)=='area_gages2')[0]]
+
+streamflow_data_val = func.basinNorm_mmday(val_y, basinarea, toNorm=True)
+
+
+data_PET_val = data_PET_all[:,time_PET.get_loc('1995-10-01'):time_PET.get_loc('2010-09-30')+1]
+xTrain_val = np.concatenate((val_x,np.expand_dims(data_PET_val,axis=-1)),axis = -1)
+
+attri_norm_val = scaler.transform(val_c, attrLst)
+xTrain_norm_val = scaler.transform(xTrain_val, forcing_lst)
+attri_norm_val[np.isnan(attri_norm_val)] = 0.0
+xTrain_norm_val[np.isnan(xTrain_norm_val)] = 0.0
+
+attri_norm_val= np.expand_dims(attri_norm_val, axis=1)
+attri_norm_val = np.repeat(attri_norm_val, xTrain_norm_val.shape[1], axis=1)
+data_norm_val = np.concatenate((xTrain_norm_val, attri_norm_val), axis=-1)
+
+warmuplength = 730 ## Use 2 years training data to warmup the simulation in validation
+xTrain_val = np.concatenate((xTrain[:,-warmuplength:,:], xTrain_val), axis= 1)
+data_norm_val = np.concatenate((data_norm[:,-warmuplength:,:], data_norm_val), axis= 1)
+
+y = np.load("/data/yxs275/NROdeSolver/CAMELSData/obs_test.npy")
+x = np.load("/data/yxs275/NROdeSolver/CAMELSData/HBV_test.npy")[:,-y.shape[1]:,:]
+z = np.load("/data/yxs275/NROdeSolver/CAMELSData/LSTM_test.npy")[:,-y.shape[1]:,:]
+
+
+test_module.testModel(xTrain_val,
+ streamflow_data_val,
+ data_norm_val,
+ nS,
+ nflux,
+ nfea,
+ nmul,
+ model,
+ delta_t,
+ tdlst,
+ bs = testbs,
+ saveFolder=saveFolder,
+ routn=routn,
+ model_name=model_name
+ )
diff --git a/nnModel/crit.py b/nnModel/crit.py
new file mode 100644
index 0000000..22dcbc3
--- /dev/null
+++ b/nnModel/crit.py
@@ -0,0 +1,555 @@
+import torch
+import numpy as np
+import math
+
+
+class SigmaLoss(torch.nn.Module):
+ def __init__(self, prior='gauss'):
+ super(SigmaLoss, self).__init__()
+ self.reduction = 'elementwise_mean'
+ if prior == '':
+ self.prior = None
+ else:
+ self.prior = prior.split('+')
+
+ def forward(self, output, target):
+ ny = target.shape[-1]
+ lossMean = 0
+ for k in range(ny):
+ p0 = output[:, :, k * 2]
+ s0 = output[:, :, k * 2 + 1]
+ t0 = target[:, :, k]
+ mask = t0 == t0
+ p = p0[mask]
+ s = s0[mask]
+ t = t0[mask]
+ if self.prior[0] == 'gauss':
+ loss = torch.exp(-s).mul((p - t)**2) / 2 + s / 2
+ elif self.prior[0] == 'invGamma':
+ c1 = float(self.prior[1])
+ c2 = float(self.prior[2])
+ nt = p.shape[0]
+ loss = torch.exp(-s).mul(
+ (p - t)**2 + c2 / nt) / 2 + (1 / 2 + c1 / nt) * s
+ lossMean = lossMean + torch.mean(loss)
+ return lossMean
+
+
+class RmseLoss(torch.nn.Module):
+ def __init__(self):
+ super(RmseLoss, self).__init__()
+
+ def forward(self, output, target):
+ ny = target.shape[2]
+ loss = 0
+ for k in range(ny):
+ p0 = output[:, :, k]
+ t0 = target[:, :, k]
+ mask = t0 == t0
+ p = p0[mask]
+ t = t0[mask]
+ temp = torch.sqrt(((p - t)**2).mean())
+ loss = loss + temp
+ return loss
+
+class RmseLossRunoff(torch.nn.Module):
+ def __init__(self):
+ super(RmseLossRunoff, self).__init__()
+
+ def forward(self, output, target):
+ ny = target.shape[2]
+ loss = 0
+ for k in range(ny):
+ p0 = torch.log10(torch.sqrt(output[:, :, k]) + 0.1)
+ t0 = torch.log10(torch.sqrt(target[:, :, k]) + 0.1)
+ # p0 = torch.sqrt(output[:, :, k])
+ # t0 = torch.sqrt(target[:, :, k])
+ mask = t0 == t0
+ p = p0[mask]
+ t = t0[mask]
+ temp = torch.sqrt(((p - t)**2).mean())
+ loss = loss + temp
+ return loss
+
+class RmseLossComb(torch.nn.Module):
+ def __init__(self, alpha, beta=1e-6):
+ super(RmseLossComb, self).__init__()
+ self.alpha = alpha # weights of log-sqrt RMSE
+ self.beta = beta
+
+ def forward(self, output, target):
+ ny = target.shape[2]
+ loss = 0
+ for k in range(ny):
+ p0 = output[:, :, k]
+ t0 = target[:, :, k]
+ p1 = torch.log10(torch.sqrt(output[:, :, k]+self.beta) + 0.1)
+ t1 = torch.log10(torch.sqrt(target[:, :, k]+self.beta) + 0.1)
+ mask = t0 == t0
+ p = p0[mask]
+ t = t0[mask]
+ loss1 = torch.sqrt(((p - t)**2).mean()) # RMSE item
+ mask1 = t1 == t1
+ pa = p1[mask1]
+ ta = t1[mask1]
+ # pa = torch.log10(torch.sqrt(p) + 0.1)
+ # ta = torch.log10(torch.sqrt(t) + 0.1)
+ loss2 = torch.sqrt(((pa - ta)**2).mean()) #Log-Sqrt RMSE item
+ temp = (1.0-self.alpha)*loss1 + self.alpha*loss2
+ loss = loss + temp
+ return loss
+
+
+
+class RmseLossCNN(torch.nn.Module):
+ def __init__(self):
+ super(RmseLossCNN, self).__init__()
+
+ def forward(self, output, target):
+ # output = ngrid * nvar * ntime
+ ny = target.shape[1]
+ loss = 0
+ for k in range(ny):
+ p0 = output[:, k, :]
+ t0 = target[:, k, :]
+ mask = t0 == t0
+ p = p0[mask]
+ t = t0[mask]
+ temp = torch.sqrt(((p - t)**2).mean())
+ loss = loss + temp
+ return loss
+
+class RmseLossANN(torch.nn.Module):
+ def __init__(self, get_length=False):
+ super(RmseLossANN, self).__init__()
+ self.ind = get_length
+
+ def forward(self, output, target):
+ if len(output.shape) == 2:
+ p0 = output[:, 0]
+ t0 = target[:, 0]
+ else:
+ p0 = output[:, :, 0]
+ t0 = target[:, :, 0]
+ mask = t0 == t0
+ p = p0[mask]
+ t = t0[mask]
+ loss = torch.sqrt(((p - t)**2).mean())
+ if self.ind is False:
+ return loss
+ else:
+ Nday = p.shape[0]
+ return loss, Nday
+
+class ubRmseLoss(torch.nn.Module):
+ def __init__(self):
+ super(ubRmseLoss, self).__init__()
+
+ def forward(self, output, target):
+ ny = target.shape[2]
+ loss = 0
+ for k in range(ny):
+ p0 = output[:, :, k]
+ t0 = target[:, :, k]
+ mask = t0 == t0
+ p = p0[mask]
+ t = t0[mask]
+ pmean = p.mean()
+ tmean = t.mean()
+ p_ub = p-pmean
+ t_ub = t-tmean
+ temp = torch.sqrt(((p_ub - t_ub)**2).mean())
+ loss = loss + temp
+ return loss
+
+class MSELoss(torch.nn.Module):
+ def __init__(self):
+ super(MSELoss, self).__init__()
+
+ def forward(self, output, target):
+ ny = target.shape[2]
+ loss = 0
+ for k in range(ny):
+ p0 = output[:, :, k]
+ t0 = target[:, :, k]
+ mask = t0 == t0
+ p = p0[mask]
+ t = t0[mask]
+ temp = ((p - t)**2).mean()
+ loss = loss + temp
+ return loss
+
+class NSELoss(torch.nn.Module):
+ def __init__(self):
+ super(NSELoss, self).__init__()
+
+ def forward(self, output, target):
+ Ngage = target.shape[1]
+ losssum = 0
+ nsample = 0
+ for ii in range(Ngage):
+ p0 = output[:, ii, 0]
+ t0 = target[:, ii, 0]
+ mask = t0 == t0
+ if len(mask[mask==True])>0:
+ p = p0[mask]
+ t = t0[mask]
+ tmean = t.mean()
+ SST = torch.sum((t - tmean) ** 2)
+ if SST != 0:
+ SSRes = torch.sum((t - p) ** 2)
+ temp = 1 - SSRes / SST
+ losssum = losssum + temp
+ nsample = nsample +1
+ # minimize the opposite average NSE
+ loss = -(losssum/nsample)
+ return loss
+
+class NSELosstest(torch.nn.Module):
+ # Same as Fredrick 2019
+ def __init__(self):
+ super(NSELosstest, self).__init__()
+
+ def forward(self, output, target):
+ Ngage = target.shape[1]
+ losssum = 0
+ nsample = 0
+ for ii in range(Ngage):
+ p0 = output[:, ii, 0]
+ t0 = target[:, ii, 0]
+ mask = t0 == t0
+ if len(mask[mask==True])>0:
+ p = p0[mask]
+ t = t0[mask]
+ tmean = t.mean()
+ SST = torch.sum((t - tmean) ** 2)
+ SSRes = torch.sum((t - p) ** 2)
+ # temp = SSRes / ((torch.sqrt(SST)+0.1)**2)
+ temp = SSRes / (SST+0.1)
+ losssum = losssum + temp
+ nsample = nsample +1
+ loss = losssum/nsample
+ return loss
+
+class NSELossBatch(torch.nn.Module):
+ # Same as Fredrick 2019, batch NSE loss
+ # stdarray: the standard deviation of the runoff for all basins
+ def __init__(self, stdarray, eps=0.1):
+ super(NSELossBatch, self).__init__()
+ self.std = stdarray
+ self.eps = eps
+
+ def forward(self, output, target, igrid):
+ nt = target.shape[0]
+ stdse = np.tile(self.std[igrid], (nt, 1))
+ stdbatch = torch.tensor(stdse, requires_grad=False).float().cuda()
+ p0 = output[:, :, 0] # dim: Time*Gage
+ t0 = target[:, :, 0]
+ mask = t0 == t0
+ p = p0[mask]
+ t = t0[mask]
+ stdw = stdbatch[mask]
+ sqRes = (p - t)**2
+ normRes = sqRes / (stdw + self.eps)**2
+ loss = torch.mean(normRes)
+
+ # sqRes = (t0 - p0)**2 # squared error
+ # normRes = sqRes / (stdbatch + self.eps)**2
+ # mask = t0 == t0
+ # loss = torch.mean(normRes[mask])
+ return loss
+
+class NSESqrtLossBatch(torch.nn.Module):
+ # Same as Fredrick 2019, batch NSE loss, use RMSE and STD instead
+ # stdarray: the standard deviation of the runoff for all basins
+ def __init__(self, stdarray, eps=0.1):
+ super(NSESqrtLossBatch, self).__init__()
+ self.std = stdarray
+ self.eps = eps
+
+ def forward(self, output, target, igrid):
+ nt = target.shape[0]
+ stdse = np.tile(self.std[igrid], (nt, 1))
+ stdbatch = torch.tensor(stdse, requires_grad=False).float().cuda()
+ p0 = output[:, :, 0] # dim: Time*Gage
+ t0 = target[:, :, 0]
+ mask = t0 == t0
+ p = p0[mask]
+ t = t0[mask]
+ stdw = stdbatch[mask]
+ sqRes = torch.sqrt((p - t)**2)
+ normRes = sqRes / (stdw + self.eps)
+ loss = torch.mean(normRes)
+
+ # sqRes = (t0 - p0)**2 # squared error
+ # normRes = sqRes / (stdbatch + self.eps)**2
+ # mask = t0 == t0
+ # loss = torch.mean(normRes[mask])
+ return loss
+
+class TrendLoss(torch.nn.Module):
+ # Add the trend part to the loss
+ def __init__(self):
+ super(TrendLoss, self).__init__()
+
+ def getSlope(self, x):
+ idx = 0
+ n = len(x)
+ d = torch.ones(int(n * (n - 1) / 2))
+
+ for i in range(n - 1):
+ j = torch.arange(start=i + 1, end=n)
+ d[idx: idx + len(j)] = (x[j] - x[i]) / (j - i).type(torch.float)
+ idx = idx + len(j)
+
+ return torch.median(d)
+
+ def forward(self, output, target, PercentLst=[100, 98, 50, 30, 2]):
+ # output, target: rho/time * Batchsize * Ntraget_var
+ ny = target.shape[2]
+ nt = target.shape[0]
+ ngage = target.shape[1]
+ loss = 0
+ for k in range(ny):
+ # loop for variable
+ p0 = output[:, :, k]
+ t0 = target[:, :, k]
+ mask = t0 == t0
+ p = p0[mask]
+ t = t0[mask]
+ # first part loss, regular RMSE
+ temp = torch.sqrt(((p - t)**2).mean())
+ loss = loss + temp
+ temptrendloss = 0
+ nsample = 0
+ for ig in range(ngage):
+ # loop for basins
+ pgage0 = p0[:, ig].reshape(-1, 365)
+ tgage0 = t0[:, ig].reshape(-1, 365)
+ gBool = np.zeros(tgage0.shape[0]).astype(int)
+ pgageM = torch.zeros(tgage0.shape[0])
+ pgageQ = torch.zeros(tgage0.shape[0], len(PercentLst))
+ tgageM = torch.zeros(tgage0.shape[0])
+ tgageQ = torch.zeros(tgage0.shape[0], len(PercentLst))
+ for ii in range(tgage0.shape[0]):
+ pgage = pgage0[ii, :]
+ tgage = tgage0[ii, :]
+ maskg = tgage == tgage
+ # quality control
+ if maskg.sum() > (1-2/12)*365:
+ gBool[ii] = 1
+ pgage = pgage[maskg]
+ tgage = tgage[maskg]
+ pgageM[ii] = pgage.mean()
+ tgageM[ii] = tgage.mean()
+ for ip in range(len(PercentLst)):
+ k = math.ceil(PercentLst[ip] / 100 * 365)
+ # pgageQ[ii, ip] = torch.kthvalue(pgage, k)[0]
+ # tgageQ[ii, ip] = torch.kthvalue(tgage, k)[0]
+ pgageQ[ii, ip] = torch.sort(pgage)[0][k-1]
+ tgageQ[ii, ip] = torch.sort(tgage)[0][k-1]
+ # Quality control
+ if gBool.sum()>6:
+ nsample = nsample + 1
+ pgageM = pgageM[gBool]
+ tgageM = tgageM[gBool]
+ # mean annual trend loss
+ temptrendloss = temptrendloss + (self.getSlope(tgageM)-self.getSlope(pgageM))**2
+ pgageQ = pgageQ[gBool, :]
+ tgageQ = tgageQ[gBool, :]
+ # quantile trend loss
+ for ii in range(tgageQ.shape[1]):
+ temptrendloss = temptrendloss + (self.getSlope(tgageQ[:, ii])-self.getSlope(pgageQ[:, ii]))**2
+
+ loss = loss + temptrendloss/nsample
+
+ return loss
+
+
+class ModifyTrend(torch.nn.Module):
+ # Add the trend part to the loss
+ def __init__(self):
+ super(ModifyTrend, self).__init__()
+
+ def getSlope(self, x):
+ nyear, ngage = x.shape
+ # define difference matirx
+ x = x.transpose(0,1)
+ xtemp = x.repeat(1, nyear)
+ xi = xtemp.reshape([ngage, nyear, nyear])
+ xj = xi.transpose(1,2)
+ # define i,j matrix
+ im = torch.arange(nyear).repeat(nyear).reshape(nyear,nyear).type(torch.float)
+ im = im.unsqueeze(0).repeat([ngage, 1, 1])
+ jm = im.transpose(1,2)
+ delta = 1.0/(im - jm)
+ delta = delta.cuda()
+ # calculate the slope matrix
+ slopeMat = (xi - xj)*delta
+ rid, cid = np.triu_indices(nyear, k=1)
+ slope = slopeMat[:, rid, cid]
+ senslope = torch.median(slope, dim=-1)[0]
+
+ return senslope
+
+
+ def forward(self, output, target, PercentLst=[-1]):
+ # output, target: rho/time * Batchsize * Ntraget_var
+ # PercentLst = [100, 98, 50, 30, 2, -1]
+ ny = target.shape[2]
+ nt = target.shape[0]
+ ngage = target.shape[1]
+ # loop for variable
+ p0 = output[:, :, 0]
+ t0 = target[:, :, 0]
+ mask = t0 == t0
+ p = p0[mask]
+ t = t0[mask]
+ # first part loss, regular RMSE
+ # loss = torch.sqrt(((p - t)**2).mean())
+ # loss = ((p - t) ** 2).mean()
+ loss = 0
+ temptrendloss = 0
+ # second loss: adding trend
+ p1 = p0.reshape(-1, 365, ngage)
+ t1 = t0.reshape(-1, 365, ngage)
+ for ip in range(len(PercentLst)):
+ k = math.ceil(PercentLst[ip] / 100 * 365)
+ # pQ = torch.kthvalue(p1, k, dim=1)[0]
+ # tQ = torch.kthvalue(t1, k, dim=1)[0]
+ # output: dim=Year*gage
+ if PercentLst[ip]<0:
+ pQ = torch.mean(p1, dim=1)
+ tQ = torch.mean(t1, dim=1)
+ else:
+ pQ = torch.sort(p1, dim=1)[0][:, k - 1, :]
+ tQ = torch.sort(t1, dim=1)[0][:, k - 1, :]
+ # temptrendloss = temptrendloss + ((self.getSlope(pQ) - self.getSlope(tQ)) ** 2).mean()
+ temptrendloss = temptrendloss + ((pQ - tQ) ** 2).mean()
+ loss = loss + temptrendloss
+
+ return loss
+
+
+class ModifyTrend1(torch.nn.Module):
+ # Add the trend part to the loss
+ def __init__(self):
+ super(ModifyTrend1, self).__init__()
+
+ def getM(self, n):
+ M = np.zeros([n**2, n])
+ s0 = np.zeros([n**2, 1])
+ for j in range (n):
+ for i in range(n):
+ k = j*n+i
+ if i 0 and (doDropMC is True or self.training is True):
+ doDrop = True
+ else:
+ doDrop = False
+
+ batchSize = input.size(1)
+
+ if hx is None:
+ hx = input.new_zeros(
+ 1, batchSize, self.hiddenSize, requires_grad=False)
+ if cx is None:
+ cx = input.new_zeros(
+ 1, batchSize, self.hiddenSize, requires_grad=False)
+
+ # cuDNN backend - disabled flat weight
+ # handle = torch.backends.cudnn.get_handle()
+ if doDrop is True:
+ self.reset_mask()
+ weight = [
+ DropMask.apply(self.w_ih, self.maskW_ih, True),
+ DropMask.apply(self.w_hh, self.maskW_hh, True), self.b_ih,
+ self.b_hh
+ ]
+ else:
+ weight = [self.w_ih, self.w_hh, self.b_ih, self.b_hh]
+
+ # output, hy, cy, reserve, new_weight_buf = torch._cudnn_rnn(
+ # input, weight, 4, None, hx, cx, torch.backends.cudnn.CUDNN_LSTM,
+ # self.hiddenSize, 1, False, 0, self.training, False, (), None)
+ output, hy, cy, reserve, new_weight_buf = torch._C._VariableFunctions._cudnn_rnn(
+ input, weight, 4, None, hx, cx, 2, # 2 means LSTM
+ self.hiddenSize, 0, 1, False, 0, self.training, False, (), None)
+
+ return output, (hy, cy)
+
+ @property
+ def all_weights(self):
+ return [[getattr(self, weight) for weight in weights]
+ for weights in self._all_weights]
+
diff --git a/nnModel/test_module.py b/nnModel/test_module.py
new file mode 100644
index 0000000..5d210c6
--- /dev/null
+++ b/nnModel/test_module.py
@@ -0,0 +1,200 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import numpy as np
+import time
+import os
+import importlib
+from HydroDLAdj.nonlinearSolver.MOL import MOL
+from HydroDLAdj.utils import rout
+from HydroDLAdj.post import plot, stat
+device = torch.device("cuda")
+dtype=torch.float32
+
+
+def testModel( x,
+ y,
+ z,
+ nS,
+ nflux,
+ nfea,
+ nmul,
+ model,
+ delta_t,
+ tdlst,
+ bs = 30,
+ saveFolder= None,
+ routn = 15,
+ dydrop = 0.0,
+ routDy = False,
+ model_name = "HBV_Module"
+ ):
+
+
+ package_name = "HydroDLAdj.HydroModels"
+ model_import_string = f"{package_name}.{model_name}"
+
+ try:
+ PBMmodel = getattr(importlib.import_module(model_import_string), model_name)
+ except ImportError:
+ print(f"Failed to import {model_name} from {package_name}")
+
+
+ ngrid, nt, nx = x.shape
+ model = model.cuda()
+ model.train(mode=False)
+ iS = np.arange(0, ngrid, bs)
+ iE = np.append(iS[1:], ngrid)
+ routscaLst = [[0, 2.9], [0, 6.5]]
+ # forward for each batch
+ for i in range(0, len(iS)):
+ bs = iE[i] - iS[i]
+ print('batch {}'.format(i))
+ xTemp = x[iS[i]:iE[i], :, :]
+
+
+ xTest = torch.from_numpy( np.swapaxes(xTemp, 1, 0)).float()
+ xTest = xTest.cuda()
+ zTemp = z[iS[i]:iE[i], :, :]
+ zTest = torch.from_numpy(np.swapaxes(zTemp, 1, 0)).float()
+ zTest = zTest.cuda()
+
+
+ hbvpara, routpara = model(zTest) ## LSTM
+ bsnew = bs * nmul
+ if nmul == 1:
+ hbvpara = hbvpara.view(nt, bsnew, nfea)
+ else:
+
+ hbvpara = hbvpara.view(nt, bs, nfea,nmul)
+ hbvpara = hbvpara.permute([0,3,1,2])
+ hbvpara = hbvpara.reshape(nt, bsnew, nfea)
+
+ if routDy is True:
+ routpara = routpara.view(nt, bs, 2,nmul)
+ routpara = routpara.permute([0,3,1,2])
+ routpara = routpara.reshape(nt, bsnew, 2)
+
+
+
+ xTest = xTest.unsqueeze(1).repeat([1,nmul,1,1])
+
+
+ xTest = xTest.view(nt, bsnew, -1)
+ y0 = torch.zeros((bsnew, nS)).to(device) # bs*ny
+
+ fluxSolution_new = torch.zeros((nt, bsnew, nflux)).to(y0)
+ fluxSolution_q0 = torch.zeros((nt, bsnew, nflux)).to(y0)
+ fluxSolution_q1 = torch.zeros((nt, bsnew, nflux)).to(y0)
+ fluxSolution_q2 = torch.zeros((nt, bsnew, nflux)).to(y0)
+ fluxSolution_ET = torch.zeros((nt, bsnew, nflux)).to(y0)
+ # hbv_para = params[:,:nfea*nmul].detach().requires_grad_(True)
+
+
+
+ f = PBMmodel(xTest, nfea)
+
+ M = MOL(f, nS, nflux, nt, bsDefault=bsnew, mtd=0, dtDefault=delta_t,eval = True)
+
+ parstaFull = hbvpara[-1, :, :].unsqueeze(0).repeat([nt, 1, 1]) # static matrix
+ parhbvFull = torch.clone(parstaFull)
+ pmat = torch.ones([1, bsnew]) * dydrop
+ for ix in tdlst:
+ staPar = parstaFull[:, :, ix - 1]
+ dynPar = hbvpara[:, :, ix-1]
+ drmask = torch.bernoulli(pmat).detach_().cuda() # to drop some dynamic parameters as static
+ comPar = dynPar * (1 - drmask) + staPar * drmask
+ parhbvFull[:, :, ix - 1] = comPar
+
+ time0 = time.time()
+ ySolution = M.nsteps_pDyn(parhbvFull, y0)
+ print("nt ", nt)
+ print("Time: ", time.time() - time0)
+ for day in range(0, nt):
+ flux,flux_q0,flux_q1,flux_q2,flux_et = f(ySolution[day, :, :], parhbvFull[day, :, :], day, returnFlux = True)
+ fluxSolution_new[day, :, :] = flux * delta_t
+ fluxSolution_q0[day, :, :] = flux_q0 * delta_t
+ fluxSolution_q1[day, :, :] = flux_q1 * delta_t
+ fluxSolution_q2[day, :, :] = flux_q2 * delta_t
+ fluxSolution_ET[day, :, :] = flux_et * delta_t
+
+ if nmul > 1 and routDy is not True:
+ fluxSolution_new = fluxSolution_new.view(nt, nmul, -1, nflux)
+ fluxSolution_new = fluxSolution_new.mean(dim=1)
+ fluxSolution_q0 = fluxSolution_q0.view(nt, nmul, -1, nflux)
+ fluxSolution_q0 = fluxSolution_q0.mean(dim=1)
+ fluxSolution_q1 = fluxSolution_q1.view(nt, nmul, -1, nflux)
+ fluxSolution_q1 = fluxSolution_q1.mean(dim=1)
+ fluxSolution_q2 = fluxSolution_q2.view(nt, nmul, -1, nflux)
+ fluxSolution_q2 = fluxSolution_q2.mean(dim=1)
+ fluxSolution_ET = fluxSolution_ET.view(nt, nmul, -1, nflux)
+ fluxSolution_ET = fluxSolution_ET.mean(dim=1)
+
+ routa = routscaLst[0][0] + routpara[-1,:, 0] * (routscaLst[0][1] - routscaLst[0][0])
+ routb = routscaLst[1][0] + routpara[-1,:, 1] * (routscaLst[1][1] - routscaLst[1][0])
+ routa = routa.repeat(nt, 1).unsqueeze(-1)
+ routb = routb.repeat(nt, 1).unsqueeze(-1)
+ UH = rout.UH_gamma(routa, routb, lenF=routn) # lenF: folter
+ rf = fluxSolution_new.permute([1, 2, 0]) # dim:gage*var*time
+ UH = UH.permute([1, 2, 0]) # dim: gage*var*time
+ Qsrout = rout.UH_conv(rf, UH).permute([2, 0, 1])
+
+ if nmul > 1 and routDy is True:
+ Qsrout = Qsrout.view(nt, nmul, -1, nflux)
+ Qsrout = Qsrout.mean(dim=1)
+ Qsrout = Qsrout.detach().cpu().numpy().swapaxes(0, 1)
+ ySolution = ySolution.view(nt, nmul, -1,nS).mean(dim=1)
+ SOut = ySolution.detach().cpu().numpy().swapaxes(0, 1)
+ Q0 = fluxSolution_q0.detach().cpu().numpy().swapaxes(0, 1)
+ Q1 = fluxSolution_q1.detach().cpu().numpy().swapaxes(0, 1)
+ Q2 = fluxSolution_q2.detach().cpu().numpy().swapaxes(0, 1)
+ ET = fluxSolution_ET.detach().cpu().numpy().swapaxes(0, 1)
+ if i == 0:
+ yOut = Qsrout
+ Spred = SOut
+ yQ0 = Q0
+ yQ1 = Q1
+ yQ2 = Q2
+ yET = ET
+ else:
+ yOut = np.concatenate((yOut, Qsrout), axis=0)
+ Spred = np.concatenate((Spred, SOut), axis=0)
+ yQ0 = np.concatenate((yQ0, Q0), axis=0)
+ yQ1 = np.concatenate((yQ1, Q1), axis=0)
+ yQ2 = np.concatenate((yQ2, Q2), axis=0)
+ yET = np.concatenate((yET, ET), axis=0)
+ model.zero_grad()
+ torch.cuda.empty_cache()
+
+
+ evaDict = [stat.statError( yOut[:, -y.shape[1]:, 0],y[:, :, 0])]
+ np.save(saveFolder + 'yOut.npy', yOut[:, -y.shape[1]:, 0])
+ np.save(saveFolder + 'Spred.npy', Spred[:, -y.shape[1]:, :])
+ np.save(saveFolder + 'Q0.npy', yQ0[:, -y.shape[1]:, 0])
+ np.save(saveFolder + 'Q1.npy', yQ1[:, -y.shape[1]:, 0])
+ np.save(saveFolder + 'Q2.npy', yQ2[:, -y.shape[1]:, 0])
+ np.save(saveFolder + 'ET.npy', yET[:, -y.shape[1]:, 0])
+ ## Show boxplots of the results
+ evaDictLst = evaDict
+ keyLst = ['NSE', 'KGE','FLV','FHV','PBiasother', 'lowRMSE', 'highRMSE','midRMSE']
+ dataBox = list()
+ for iS in range(len(keyLst)):
+ statStr = keyLst[iS]
+ temp = list()
+ for k in range(len(evaDictLst)):
+ data = evaDictLst[k][statStr]
+ data = data[~np.isnan(data)]
+ temp.append(data)
+ dataBox.append(temp)
+
+ print("NSE,KGE,'PBiaslow','PBiashigh','PBiasother', mean lowRMSE, highRMSE, and midRMSE of all basins in testing period: ", np.nanmedian(dataBox[0][0]),
+ np.nanmedian(dataBox[1][0]), np.nanmedian(dataBox[2][0]), np.nanmedian(dataBox[3][0]),
+ np.nanmedian(dataBox[4][0]), np.nanmean(dataBox[5][0]),np.nanmean(dataBox[6][0]), np.nanmean(dataBox[7][0]))
+
+
+
+ return
+
+
+
+
diff --git a/nnModel/train_module.py b/nnModel/train_module.py
new file mode 100644
index 0000000..3097ca1
--- /dev/null
+++ b/nnModel/train_module.py
@@ -0,0 +1,221 @@
+import torch
+import numpy as np
+import time
+import os
+import importlib
+from HydroDLAdj.nonlinearSolver.MOL import MOL
+from HydroDLAdj.nnModel import crit
+from HydroDLAdj.utils import rout
+device = torch.device("cuda")
+dtype=torch.float32
+
+
+def trainModel(x,
+ y,
+ z_norm,
+ nS,
+ nflux,
+ nfea,
+ nmul,
+ model,
+ delta_t,
+ alpha,
+ tdlst,
+ startEpoch=1,
+ nEpoch=50,
+ miniBatch=[100, 365],
+ buffTime=0,
+ saveFolder=None,
+ routn=15,
+ dydrop=0.0,
+ routDy = False,
+ model_name = "HBV_Module"
+ ):
+ package_name = "HydroDLAdj.HydroModels"
+ model_import_string = f"{package_name}.{model_name}"
+
+ try:
+ PBMmodel = getattr(importlib.import_module(model_import_string), model_name)
+ except ImportError:
+ print(f"Failed to import {model_name} from {package_name}")
+
+
+ lossFun = crit.RmseLossComb(alpha=alpha)
+ model = model.cuda()
+ lossFun = lossFun.cuda()
+ optimizer = torch.optim.Adadelta(model.parameters())
+ model.zero_grad()
+ bs, rho = miniBatch
+ ngrid, nt, nx = x.shape
+ nIterEp = int(
+ np.ceil(np.log(0.01) / np.log(1 - bs * (rho) / ngrid / (nt - buffTime))))
+ runFile = os.path.join(saveFolder, 'run.csv')
+ log_rf = open(runFile, 'a')
+ routscaLst = [[0, 2.9], [0, 6.5]]
+ print("Start from Epoch ", startEpoch)
+ print("Routing days ", routn)
+ print("Parameters dropout ", dydrop)
+ print("Number of component ", nmul)
+ print("Dynamic parameters ", tdlst)
+ print("Rounting is Dynamic or not ", routDy)
+ #HBV_physics = HBV
+ for iEpoch in range(startEpoch, nEpoch + 1):
+ lossEp = 0
+ t0 = time.time()
+ for iIter in range(0, nIterEp):
+ tIter = time.time()
+ iGrid, iT = randomIndex(ngrid, nt, [bs, rho], bufftime=buffTime)
+ xTrain = selectSubset(x, iGrid, iT, rho, bufftime=buffTime)
+ yTrain = selectSubset(y, iGrid, iT, rho)
+ z_normTrain = selectSubset(z_norm, iGrid, iT, rho, bufftime=buffTime)
+
+ xTrain = xTrain.unsqueeze(1).repeat([1, nmul, 1, 1])
+
+ bsnew = bs * nmul
+ fluxSolution_new = torch.zeros((rho, bsnew, nflux)).cuda()
+ xTrain = xTrain.view(rho + buffTime, bsnew, -1)
+
+ y0 = torch.zeros((bsnew, nS)).to(device) # bs*ny
+
+ hbvpara, routpara = model(z_normTrain) ## LSTM
+ if nmul == 1:
+ hbvpara = hbvpara.view(rho + buffTime, bsnew, nfea)
+
+ else:
+
+ hbvpara = hbvpara.view(rho + buffTime, bs, nfea,nmul)
+ hbvpara = hbvpara.permute([0,3,1,2])
+ hbvpara = hbvpara.reshape(rho + buffTime, bsnew, nfea)
+ if routDy is True:
+ routpara = routpara.view(rho + buffTime, bs,2,nmul)
+ routpara = routpara.permute([0,3,1,2])
+ routpara = routpara.reshape(rho + buffTime, bsnew, 2)
+
+ f_warm_up = PBMmodel(xTrain[:buffTime, :, :], nfea)
+
+ M_warm_up = MOL(f_warm_up, nS, nflux, buffTime, bsDefault=bsnew, mtd=0, dtDefault=delta_t)
+
+ para_warm_up = hbvpara[buffTime - 1, :, :].unsqueeze(0).repeat([buffTime, 1, 1])
+ y_warm_up = M_warm_up.nsteps_pDyn(para_warm_up, y0)
+
+ parstaFull = hbvpara[-1, :, :].unsqueeze(0).repeat([rho, 1, 1]) # static matrix
+ parhbvFull = torch.clone(parstaFull)
+ pmat = torch.ones([1, bsnew]) * dydrop
+ for ix in tdlst:
+ staPar = parstaFull[:, :, ix - 1]
+ dynPar = hbvpara[buffTime:, :, ix - 1]
+ drmask = torch.bernoulli(pmat).detach_().cuda() # to drop some dynamic parameters as static
+ comPar = dynPar * (1 - drmask) + staPar * drmask
+ parhbvFull[:, :, ix - 1] = comPar
+ f = PBMmodel(xTrain[buffTime:, :, :], nfea)
+ M = MOL(f, nS, nflux, rho, bsDefault=bsnew, dtDefault=delta_t, mtd=0)
+ ### Newton iterations with adjoint
+ ySolution = M.nsteps_pDyn(parhbvFull, y_warm_up[-1, :, :])
+
+ tflux = time.time()
+ for day in range(0, rho):
+ _, flux = f(ySolution[day, :, :], parhbvFull[day, :, :], day)
+ fluxSolution_new[day, :, :] = flux * delta_t
+ if nmul > 1 and routDy is not True:
+ fluxSolution_new = fluxSolution_new.view(rho,nmul,-1,nflux)
+ fluxSolution_new = fluxSolution_new.mean(dim=1)
+
+ routa = routscaLst[0][0] + routpara[-1, :, 0] * (routscaLst[0][1] - routscaLst[0][0])
+ routb = routscaLst[1][0] + routpara[-1, :, 1] * (routscaLst[1][1] - routscaLst[1][0])
+
+ routa = routa.repeat(rho, 1).unsqueeze(-1)
+ routb = routb.repeat(rho, 1).unsqueeze(-1)
+
+ UH = rout.UH_gamma(routa, routb, lenF=routn) # lenF: folter
+ rf = fluxSolution_new.permute([1, 2, 0]) # dim:gage*var*time
+ UH = UH.permute([1, 2, 0]) # dim: gage*var*time
+ Qsrout = rout.UH_conv(rf, UH).permute([2, 0, 1])
+ if nmul > 1 and routDy is True:
+ Qsrout = Qsrout.view(rho, nmul, -1, nflux)
+ Qsrout = Qsrout.mean(dim=1)
+
+ loss = lossFun(Qsrout[:, :, :], yTrain[:, :, :])
+ tback = time.time()
+ loss.backward()
+ optimizer.step()
+
+ model.zero_grad()
+ lossEp = lossEp + loss.item()
+
+ if iIter % 1 == 0:
+ IterStr = 'Iter {} of {}: Loss {:.3f} total time {:.2f} fluxes time {:.2f} back time {:.2f}'.format(
+ iIter, nIterEp, loss.item(), time.time() - tIter, time.time() - tflux, time.time() - tback)
+
+ print(IterStr)
+ log_rf.write(IterStr + '\n')
+
+ # print loss
+ lossEp = lossEp / nIterEp
+
+ logStr = 'Epoch {} Loss {:.3f} time {:.2f}'.format(
+ iEpoch, lossEp,
+ time.time() - t0)
+ print(logStr)
+ log_rf.write(logStr + '\n')
+
+ modelFile = os.path.join(saveFolder, 'model_Ep' + str(iEpoch) + '.pt')
+ torch.save(model, modelFile)
+ log_rf.close()
+
+
+
+
+def selectSubset(x, iGrid, iT, rho, *, c=None, tupleOut=False, LCopt=False, bufftime=0):
+ nx = x.shape[-1]
+ nt = x.shape[1]
+ if x.shape[0] == len(iGrid): #hack
+ iGrid = np.arange(0,len(iGrid)) # hack
+ if nt <= rho:
+ iT.fill(0)
+
+ batchSize = iGrid.shape[0]
+ if iT is not None:
+ # batchSize = iGrid.shape[0]
+ xTensor = torch.zeros([rho+bufftime, batchSize, nx], requires_grad=False)
+ for k in range(batchSize):
+ temp = x[iGrid[k]:iGrid[k] + 1, np.arange(iT[k]-bufftime, iT[k] + rho), :]
+ xTensor[:, k:k + 1, :] = torch.from_numpy(np.swapaxes(temp, 1, 0))
+ else:
+ if LCopt is True:
+ # used for local calibration kernel: FDC, SMAP...
+ if len(x.shape) == 2:
+ # Used for local calibration kernel as FDC
+ # x = Ngrid * Ntime
+ xTensor = torch.from_numpy(x[iGrid, :]).float()
+ elif len(x.shape) == 3:
+ # used for LC-SMAP x=Ngrid*Ntime*Nvar
+ xTensor = torch.from_numpy(np.swapaxes(x[iGrid, :, :], 1, 2)).float()
+ else:
+ # Used for rho equal to the whole length of time series
+ xTensor = torch.from_numpy(np.swapaxes(x[iGrid, :, :], 1, 0)).float()
+ rho = xTensor.shape[0]
+ if c is not None:
+ nc = c.shape[-1]
+ temp = np.repeat(
+ np.reshape(c[iGrid, :], [batchSize, 1, nc]), rho+bufftime, axis=1)
+ cTensor = torch.from_numpy(np.swapaxes(temp, 1, 0)).float()
+
+ if (tupleOut):
+ if torch.cuda.is_available():
+ xTensor = xTensor.cuda()
+ cTensor = cTensor.cuda()
+ out = (xTensor, cTensor)
+ else:
+ out = torch.cat((xTensor, cTensor), 2)
+ else:
+ out = xTensor
+
+ if torch.cuda.is_available() and type(out) is not tuple:
+ out = out.cuda()
+ return out
+
+def randomIndex(ngrid, nt, dimSubset, bufftime=0):
+ batchSize, rho = dimSubset
+ iGrid = np.random.randint(0, ngrid, [batchSize])
+ iT = np.random.randint(0+bufftime, nt - rho, [batchSize])
+ return iGrid, iT
diff --git a/nonlinearSolver/Jacobian.py b/nonlinearSolver/Jacobian.py
new file mode 100644
index 0000000..4754a4d
--- /dev/null
+++ b/nonlinearSolver/Jacobian.py
@@ -0,0 +1,29 @@
+import torch
+
+def batchJacobian_AD_loop(y, x, graphed=False, batchx=True):
+ if y.ndim == 1:
+ y = y.unsqueeze(1)
+ ny = y.shape[-1]
+ b = y.shape[0]
+
+ def get_vjp(v, yi):
+ grads = torch.autograd.grad(outputs=yi, inputs=x, grad_outputs=v,retain_graph=True,create_graph=graphed)
+ return grads
+
+
+ nx = x.shape[-1]
+
+ jacobian = torch.zeros(b,ny, nx).to(y)
+ for i in range(ny):
+ v = torch.ones(b).to(y)
+
+ grad = get_vjp(v, y[:,i])[0]
+ jacobian[:,i, :] = grad
+ if not batchx:
+ jacobian.squeeze(0)
+
+
+ if not graphed:
+ jacobian = jacobian.detach()
+
+ return jacobian
\ No newline at end of file
diff --git a/nonlinearSolver/MOL.py b/nonlinearSolver/MOL.py
new file mode 100644
index 0000000..ed068a1
--- /dev/null
+++ b/nonlinearSolver/MOL.py
@@ -0,0 +1,54 @@
+import torch
+
+from HydroDLAdj.nonlinearSolver.NewtonSolve import NewtonSolve
+
+newtonAdj = NewtonSolve.apply
+class MOL(torch.nn.Module):
+ # Method of Lines time integrator as a nonlinear equation G(x, p, xt, t, auxG)=0.
+ # rhs is preloaded at construct and is the equation for the right hand side of the equation.
+ def __init__(self, rhsFunc,ny,nflux,rho, bsDefault =1 , mtd = 0, dtDefault=0, solveAdj = newtonAdj,eval = False):
+ super(MOL, self).__init__()
+ self.mtd = mtd # time discretization method. =0 for backward Euler
+ self.rhs = rhsFunc
+ self.delta_t = dtDefault
+ self.bs = bsDefault
+ self.ny = ny
+ self.nflux = nflux
+ self.rho = rho
+ self.solveAdj = solveAdj
+ self.eval = eval
+
+ def forward(self, x, p, xt, t, auxG): # take one step
+ # xt is x^{t}. trying to solve for x^{t+1}
+ dt, aux = auxG # expand auxiliary data
+
+ if self.mtd == 0: # backward Euler
+ rhs,_ = self.rhs(x, p, t, aux) # should return [nb,ng]
+ gg = (x - xt)/dt - rhs
+ elif self.mtd == 1: # Crank Nicholson
+ rhs,_ = self.rhs(x, p, t, aux) # should return [nb,ng]
+ rhst,_ = self.rhs(xt, p, t, aux) # should return [nb,ng]
+ gg = (x - xt)/dt - (rhs+rhst)*0.5
+ return gg
+
+ def nsteps_pDyn(self,pDyn,x0):
+ bs = self.bs
+ ny = self.ny
+ delta_t = self.delta_t
+ rho = self.rho
+ ySolution = torch.zeros((rho,bs,ny)).to(pDyn)
+ ySolution[0,:,:] = x0
+
+ xt=x0.clone().requires_grad_()
+
+ auxG = (delta_t, None)
+
+ for t in range(rho):
+ p = pDyn[t,:,:]
+
+ x = self.solveAdj(p, xt,t, self.forward, None, auxG,True, self.eval)
+
+ ySolution[t,:,:] = x
+ xt = x
+
+ return ySolution
diff --git a/nonlinearSolver/NewtonSolve.py b/nonlinearSolver/NewtonSolve.py
new file mode 100644
index 0000000..72700f8
--- /dev/null
+++ b/nonlinearSolver/NewtonSolve.py
@@ -0,0 +1,126 @@
+import torch
+#from HydroDLAdj.Jacobian import batchJacobian_AD
+from HydroDLAdj.nonlinearSolver.Jacobian import batchJacobian_AD_loop
+#import pydevd
+matrixSolve = torch.linalg.solve
+class NewtonSolve(torch.autograd.Function):
+ # Newton that can custom gradient tracking for two parameters: p and p2 (both Tensors)
+ # if only one is needed, set p2 to None.
+ # p2 can be, for example, x^t for Method of Lines time integrator
+ # it is a little easier than combining everything inside p
+ # if p2 is not None then G should also accept p2: G(x, p, p2, t, auxG)
+ # auxG does not track gradient. We must have not auxG variables influenced by p or p2!!!
+ @staticmethod
+ def forward(ctx, p, p2, t,G, x0=None, auxG=None, batchP=True,eval = False):
+ # batchP =True if parameters have a batch dimension
+ # with torch.no_grad():
+
+ useAD_jac=True
+ if x0 is None and p2 is not None:
+ x0 = p2
+
+ x = x0.clone().detach(); i=0;
+ max_iter=3; gtol=1e-3;
+
+ if useAD_jac:
+ torch.set_grad_enabled(True)
+
+ x.requires_grad = True #p.requires_grad = True
+
+ if p2 is None:
+ gg = G(x, p, t, auxG)
+ else:
+ gg = G(x, p, p2, t, auxG)
+
+ # dGdx = batchJacobian_AD(gg,x,graphed=True)
+ dGdx = batchJacobian_AD_loop(gg, x, graphed=True)
+ if torch.isnan(dGdx).any() or torch.isinf(dGdx).any():
+ raise RuntimeError(f"Jacobian matrix is NaN")
+ x = x.detach() # not 100% sure if needed to detach
+
+ torch.set_grad_enabled(False)
+ resnorm = torch.linalg.norm(gg, float('inf'),dim= [1]) # calculate norm of the residuals
+ resnorm0 = 100*resnorm;
+
+
+ while ((torch.max(resnorm)>gtol ) and i<=max_iter):
+ i+=1
+ if torch.max(resnorm/resnorm0) > 0.2:
+ if useAD_jac:
+ torch.set_grad_enabled(True)
+
+ x.requires_grad = True #p.requires_grad = True
+
+ if p2 is None:
+ gg = G(x, p, t, auxG)
+ else:
+ gg = G(x, p, p2, t, auxG)
+
+ #dGdx = batchJacobian_AD(gg,x,graphed=True)
+ dGdx = batchJacobian_AD_loop(gg, x, graphed=True)
+ if torch.isnan(dGdx).any() or torch.isinf(dGdx).any():
+ raise RuntimeError(f"Jacobian matrix is NaN")
+
+ x = x.detach() # not 100% sure if needed to detach
+
+ torch.set_grad_enabled(False)
+
+ if dGdx.ndim==gg.ndim: # same dimension, must be scalar.
+ dx = (gg/dGdx).detach()
+ else:
+ dx = matrixSolve(dGdx, gg).detach()
+ x = x - dx
+ if useAD_jac:
+ torch.set_grad_enabled(True)
+ x.requires_grad = True
+ if p2 is None:
+ gg = G(x, p, t, auxG)
+ else:
+ gg = G(x, p, p2, t, auxG)
+ torch.set_grad_enabled(False)
+ resnorm0 = resnorm; ##% old resnorm
+ resnorm = torch.linalg.norm(gg, float('inf'),dim= [1]);
+
+ torch.set_grad_enabled(True)
+ x = x.detach()
+ if not eval:
+ if batchP:
+ # dGdp is needed only upon convergence.
+ if p2 is None:
+ #dGdp = batchJacobian_AD(gg, p, graphed=True); dGdp2 = None
+ dGdp = batchJacobian_AD_loop(gg, p, graphed=True);
+ dGdp2 = None
+ else:
+ # dGdp, dGdp2 = batchJacobian_AD(gg, (p,p2),graphed=True)
+ dGdp = batchJacobian_AD_loop(gg, p,graphed=True)# this one is needed only upon convergence.
+ dGdp2 = batchJacobian_AD_loop(gg, p2, graphed=True)
+ if torch.isnan(dGdp).any() or torch.isinf(dGdp).any() or torch.isnan(dGdp2).any() or torch.isinf(dGdp2).any():
+ raise RuntimeError(f"Jacobian matrix is NaN")
+ # it is a tuple of gradient to p and p2 separately, already detached inside
+ else:
+ assert("nonbatchp (like NN) pathway not debugged through yet")
+ # print("day ",t,"Iterations ", i)
+ ctx.save_for_backward(dGdp,dGdp2,dGdx)
+ # This way, we reduced one forward run. You can also save these two to the CPU if forward run is
+ # Alternatively, if memory is a problem, save x and run g during the backward.
+ torch.set_grad_enabled(False)
+ del gg
+ return x
+
+ @staticmethod
+ def backward(ctx, dLdx):
+ # pydevd.settrace(suspend=False, trace_only_current_thread=True)
+ with torch.no_grad():
+ dGdp,dGdp2,dGdx = ctx.saved_tensors
+ dGdxT = torch.permute(dGdx, (0, 2, 1))
+ lambTneg = matrixSolve(dGdxT, dLdx);
+ if lambTneg.ndim<=2:
+ lambTneg = torch.unsqueeze(lambTneg,2)
+ dLdp = -torch.bmm(torch.permute(lambTneg,(0, 2, 1)),dGdp)
+ dLdp = torch.squeeze(dLdp,1) # ADHOC!! DON"T KNOW WHY!!
+ if dGdp2 is None:
+ dLdp2 = None
+ else:
+ dLdp2 = -torch.bmm(torch.permute(lambTneg,(0, 2, 1)),dGdp2)
+ dLdp2 = torch.squeeze(dLdp2,1) # ADHOC!! DON"T KNOW WHY!!
+ return dLdp, dLdp2, None,None, None, None, None,None
diff --git a/post/plot.py b/post/plot.py
new file mode 100644
index 0000000..2227682
--- /dev/null
+++ b/post/plot.py
@@ -0,0 +1,1082 @@
+import numpy as np
+import scipy
+import matplotlib.pyplot as plt
+import statsmodels.api as sm
+from matplotlib.collections import PatchCollection
+from matplotlib.patches import Rectangle
+import matplotlib.gridspec as gridspec
+import HydroDLAdj.utils
+import string
+# import cartopy.crs as ccrs
+# import cartopy.feature as cfeature
+import matplotlib.pyplot as plt
+
+# from mpl_toolkits import basemap
+
+
+def plotBoxFig(data,
+ label1=None,
+ label2=None,
+ colorLst='rbkgcmywrbkgcmyw',
+ title=None,
+ figsize=(10, 8),
+ sharey=True,
+ xticklabel=None,
+ axin=None,
+ ylim=None,
+ ylabel=None,
+ widths=0.5,
+ ):
+ nc = len(data)
+ if axin is None:
+ fig, axes = plt.subplots(ncols=nc, sharey=sharey, figsize=figsize, constrained_layout=True)
+ else:
+ axes = axin
+
+ for k in range(0, nc):
+ ax = axes[k] if nc > 1 else axes
+ temp = data[k]
+ if type(temp) is list:
+ for kk in range(len(temp)):
+ tt = temp[kk]
+ if tt is not None and tt != []:
+ tt = tt[~np.isnan(tt)]
+ temp[kk] = tt
+ else:
+ temp[kk] = []
+ else:
+ temp = temp[~np.isnan(temp)]
+ bp = ax.boxplot(temp, patch_artist=True, notch=True, showfliers=False, widths = widths)
+ for kk in range(0, len(bp['boxes'])):
+ plt.setp(bp['boxes'][kk], facecolor=colorLst[kk])
+
+ if label1 is not None:
+ ax.set_xlabel(label1[k])
+ else:
+ ax.set_xlabel(str(k))
+ if xticklabel is None:
+ ax.set_xticks([])
+ else:
+ ax.set_xticks([y+1 for y in range(0,len(data[k]),2)])
+ ax.set_xticklabels(xticklabel)
+ # ax.ticklabel_format(axis='y', style='sci')
+ if ylabel is not None:
+ ax.set_ylabel(ylabel[k])
+ # yh = np.nanmedian(data[k][0])
+ # ax.axhline(yh, xmin=0, xmax=1, color='r',
+ # linestyle='dashed', linewidth=2)
+ # yh1 = np.nanmedian(data[k][1])
+ # ax.axhline(yh1, xmin=0, xmax=1, color='b',
+ # linestyle='dashed', linewidth=2)
+ if ylim is not None:
+ if len(ylim) > 1:
+ ax.set_ylim(ylim[k])
+ else:
+ ax.set_ylim(ylim[0])
+ if label2 is not None:
+ if nc == 1:
+ ax.legend(bp['boxes'], label2, loc='upper center', frameon=False, ncol=2, columnspacing=1.0)
+ else:
+ axes[-1].legend(bp['boxes'], label2, loc='lower center', frameon=False, ncol=2, fontsize=12)
+ if title is not None:
+ # fig.suptitle(title)
+ ax.set_title(title)
+ if axin is None:
+ return fig
+ else:
+ return ax, bp
+
+
+def plotBoxF(data,
+ label1=None,
+ label2=None,
+ colorLst='rbkgcmy',
+ title=None,
+ figsize=(10, 8),
+ sharey=True,
+ xticklabel=None,
+ ylabel=None,
+ subtitles=None
+ ):
+ nc = len(data)
+ fig, axes = plt.subplots(nrows=3, ncols=2, sharey=sharey, figsize=figsize, constrained_layout=True)
+ axes = axes.flat
+ for k in range(0, nc):
+ ax = axes[k] if nc > 1 else axes
+ # ax = axes[k]
+ bp = ax.boxplot(
+ data[k], patch_artist=True, notch=True, showfliers=False)
+ for kk in range(0, len(bp['boxes'])):
+ plt.setp(bp['boxes'][kk], facecolor=colorLst[0])
+ if k == 2:
+ yrange = ax.get_ylim()
+ if k == 3:
+ ax.set(ylim=yrange)
+ ax.axvline(len(data[k])-3+0.5, ymin=0, ymax=1, color='k',
+ linestyle='dashed', linewidth=1)
+ if ylabel[k] not in ['NSE', 'Corr', 'RMSE', 'KGE']:
+ ax.axhline(0, xmin=0, xmax=1,color='k',
+ linestyle='dashed', linewidth=1)
+
+ if label1 is not None:
+ ax.set_xlabel(label1[k])
+ if ylabel is not None:
+ ax.set_ylabel(ylabel[k])
+ if xticklabel is None:
+ ax.set_xticks([])
+ else:
+ ax.set_xticks([y+1 for y in range(0,len(data[k]))])
+ ax.set_xticklabels(xticklabel)
+ if subtitles is not None:
+ ax.set_title(subtitles[k], loc='left')
+ # ax.ticklabel_format(axis='y', style='sci')
+ if label2 is not None:
+ if nc == 1:
+ ax.legend(bp['boxes'], label2, loc='best', frameon=False, ncol=2)
+ else:
+ axes[-1].legend(bp['boxes'], label2, loc='best', frameon=False, ncol=2, fontsize=12)
+ if title is not None:
+ fig.suptitle(title)
+ return fig
+
+def plotMultiBoxFig(data,
+ *,
+ axes=None,
+ label1=None,
+ label2=None,
+ colorLst='grbkcmy',
+ title=None,
+ figsize=(10, 8),
+ sharey=True,
+ xticklabel=None,
+ position=None,
+ ylabel=None,
+ ylim = None,
+ ):
+ nc = len(data)
+ if axes is None:
+ fig, axes = plt.subplots(ncols=nc, sharey=sharey, figsize=figsize, constrained_layout=True)
+ nv = len(data[0])
+ ndays = len(data[0][1])-1
+ for k in range(0, nc):
+ ax = axes[k] if nc > 1 else axes
+ bp = [None]*nv
+ for ii in range(nv):
+ bp[ii] = ax.boxplot(
+ data[k][ii], patch_artist=True, notch=True, showfliers=False, positions=position[ii], widths=0.2)
+ for kk in range(0, len(bp[ii]['boxes'])):
+ plt.setp(bp[ii]['boxes'][kk], facecolor=colorLst[ii])
+
+ if label1 is not None:
+ ax.set_xlabel(label1[k])
+ else:
+ ax.set_xlabel(str(k))
+ if ylabel is not None:
+ ax.set_ylabel(ylabel[k])
+ if xticklabel is None:
+ ax.set_xticks([])
+ else:
+ ax.set_xticks([-0.7]+[y for y in range(0,len(data[k][1])+1)])
+ # ax.set_xticks([y for y in range(0, len(data[k][1]) + 1)])
+ # xtickloc = [0.25, 0.75] + np.arange(1.625, 5, 1.25).tolist() + [5.5, 5.5+0.25*6]
+ # ax.set_xticks([y for y in xtickloc])
+ ax.set_xticklabels(xticklabel)
+ # ax.set_xlim([0.0, 7.75])
+ ax.set_xlim([-0.9, ndays + 0.5])
+ # ax.set_xlim([-0.5, ndays + 0.5])
+ # ax.ticklabel_format(axis='y', style='sci')
+ # vlabel = [0.5] + np.arange(1.0, 5, 1.25).tolist() + [4.75+0.25*6, 4.75+0.25*12]
+ vlabel = np.arange(-0.5, len(data[k][1]) + 1)
+ for xv in vlabel:
+ ax.axvline(xv, ymin=0, ymax=1, color='k',
+ linestyle='dashed', linewidth=1)
+ yh0 = np.nanmedian(data[k][0][0])
+ ax.axhline(yh0, xmin=0, xmax=1, color='grey',
+ linestyle='dashed', linewidth=2)
+ yh = np.nanmedian(data[k][0][1])
+ ax.axhline(yh, xmin=0, xmax=1, color='r',
+ linestyle='dashed', linewidth=2)
+ yh1 = np.nanmedian(data[k][1][0])
+ ax.axhline(yh1, xmin=0, xmax=1, color='b',
+ linestyle='dashed', linewidth=2)
+ if ylim is not None:
+ ax.set_ylim(ylim)
+ labelhandle = list()
+ for ii in range(nv):
+ labelhandle.append(bp[ii]['boxes'][0])
+ if label2 is not None:
+ if nc == 1:
+ ax.legend(labelhandle, label2, loc='lower center', frameon=False, ncol=2)
+ else:
+ axes[-1].legend(labelhandle, label2, loc='lower center', frameon=False, ncol=1, fontsize=12)
+ if title is not None:
+ # fig.suptitle(title)
+ ax.set_title(title)
+ if axes is None:
+ return fig
+ else:
+ return ax, labelhandle
+
+
+def plotTS(t,
+ y,
+ *,
+ ax=None,
+ tBar=None,
+ figsize=(12, 4),
+ cLst='rbkgcmy',
+ markerLst=None,
+ linespec=None,
+ legLst=None,
+ title=None,
+ linewidth=2,
+ ylabel=None,
+ yRange=None):
+ newFig = False
+ if ax is None:
+ fig = plt.figure(figsize=figsize)
+ ax = fig.subplots()
+ newFig = True
+
+ if type(y) is np.ndarray:
+ y = [y]
+ for k in range(len(y)):
+ tt = t[k] if type(t) is list else t
+ yy = y[k]
+ legStr = None
+ if legLst is not None:
+ legStr = legLst[k]
+ if markerLst is None:
+ if True in np.isnan(yy):
+ ax.plot(tt, yy, '*', color=cLst[k], label=legStr)
+ else:
+ ax.plot(
+ tt, yy, color=cLst[k], label=legStr, linewidth=linewidth)
+ else:
+ if markerLst[k] is '-':
+ if linespec is not None:
+ ax.plot(tt, yy, color=cLst[k], label=legStr, linestyle=linespec[k], lw=1.5)
+ else:
+ ax.plot(tt, yy, color=cLst[k], label=legStr, lw=1.5)
+ else:
+ ax.plot(
+ tt, yy, color=cLst[k], label=legStr, marker=markerLst[k])
+ if ylabel is not None:
+ ax.set_ylabel(ylabel)
+ # ax.set_xlim([np.min(tt), np.max(tt)])
+ if yRange is not None:
+ ax.set_ylim(yRange)
+ if tBar is not None:
+ ylim = ax.get_ylim()
+ tBar = [tBar] if type(tBar) is not list else tBar
+ for tt in tBar:
+ ax.plot([tt, tt], ylim, '-k')
+
+ if legLst is not None:
+ ax.legend(loc='upper right', frameon=True)
+ if title is not None:
+ ax.set_title(title, loc='center')
+ if newFig is True:
+ return fig, ax
+ else:
+ return ax
+
+
+def plotVS(x,
+ y,
+ *,
+ ax=None,
+ title=None,
+ xlabel=None,
+ ylabel=None,
+ titleCorr=True,
+ plot121=True,
+ doRank=False,
+ figsize=(8, 6)):
+ if doRank is True:
+ x = scipy.stats.rankdata(x)
+ y = scipy.stats.rankdata(y)
+ corr = scipy.stats.pearsonr(x, y)[0]
+ pLr = np.polyfit(x, y, 1)
+ xLr = np.array([np.min(x), np.max(x)])
+ yLr = np.poly1d(pLr)(xLr)
+
+ if ax is None:
+ fig = plt.figure(figsize=figsize)
+ ax = fig.subplots()
+ else:
+ fig = None
+ if title is not None:
+ if titleCorr is True:
+ title = title + ' ' + r'$\rho$={:.2f}'.format(corr)
+ ax.set_title(title)
+ else:
+ if titleCorr is True:
+ ax.set_title(r'$\rho$=' + '{:.2f}'.format(corr))
+ if xlabel is not None:
+ ax.set_xlabel(xlabel)
+ if ylabel is not None:
+ ax.set_ylabel(ylabel)
+ # corr = np.corrcoef(x, y)[0, 1]
+ ax.plot(x, y, 'b.')
+ ax.plot(xLr, yLr, 'r-')
+
+ if plot121 is True:
+ plot121Line(ax)
+
+ return fig, ax
+
+def plotxyVS(x,
+ y,
+ *,
+ ax=None,
+ title=None,
+ xlabel=None,
+ ylabel=None,
+ titleCorr=True,
+ plot121=True,
+ plotReg=False,
+ corrType='Pearson',
+ figsize=(8, 6),
+ markerType = 'ob'):
+ if corrType is 'Pearson':
+ corr = scipy.stats.pearsonr(x, y)[0]
+ elif corrType is 'Spearman':
+ corr = scipy.stats.spearmanr(x, y)[0]
+ elif corrType is 'R2':
+ ymean = np.mean(y)
+ SST = np.sum((y-ymean)**2)
+ SSRes = np.sum((y-x)**2)
+ corr = 1-SSRes/SST
+
+ rmse = np.sqrt(np.nanmean((x - y)**2))
+ pLr = np.polyfit(x, y, 1)
+ xLr = np.array([np.min(x), np.max(x)])
+ yLr = np.poly1d(pLr)(xLr)
+
+ if ax is None:
+ fig = plt.figure(figsize=figsize)
+ ax = fig.subplots()
+ else:
+ fig = None
+ if title is not None:
+ if titleCorr is True:
+ title = title + ' ' + r'$R^2$={:.2f}'.format(corr) + '\n' + r'$RMSE$={:.3f}'.format(rmse)
+ ax.set_title(title, loc='center')
+ else:
+ if titleCorr is True:
+ ax.set_title(r'$R^2$=' + '{:.2f}'.format(corr)) + '\n' + r'$RMSE$={:.3f}'.format(rmse)
+ if xlabel is not None:
+ ax.set_xlabel(xlabel)
+ if ylabel is not None:
+ ax.set_ylabel(ylabel)
+ h = ax.plot(x, y, markerType, markerfacecolor='none')
+ ax.set_xlim(min(np.min(x), np.min(y))-0.1, max(np.max(x), np.max(y))+0.1)
+ ax.set_ylim(min(np.min(x), np.min(y))-0.1, max(np.max(x), np.max(y))+0.1)
+ # ax.set_xlim(np.min(x)-0.1, np.max(x)+0.1)
+ # ax.set_ylim(np.min(x)-0.1, np.max(x)+0.1)
+ if plotReg is True:
+ ax.plot(xLr, yLr, 'r-')
+ ax.set_aspect('equal', 'box')
+ if plot121 is True:
+ plot121Line(ax)
+ # xyline = np.linspace(*ax.get_xlim())
+ # ax.plot(xyline, xyline)
+
+ return fig, ax, corr
+
+def plot121Line(ax, spec='k-'):
+ xlim = ax.get_xlim()
+ ylim = ax.get_ylim()
+ vmin = np.min([xlim[0], ylim[0]])
+ vmax = np.max([xlim[1], ylim[1]])
+ ax.plot([vmin, vmax], [vmin, vmax], spec)
+
+
+def plotMap(data,
+ *,
+ ax=None,
+ lat=None,
+ lon=None,
+ title=None,
+ cRange=None,
+ shape=None,
+ pts=None,
+ figsize=(8, 4),
+ clbar=True,
+ cRangeint=False,
+ cmap=plt.cm.jet,
+ bounding=None,
+ prj='cyl'):
+
+ if cRange is not None:
+ vmin = cRange[0]
+ vmax = cRange[1]
+ else:
+ temp = flatData(data)
+ vmin = np.percentile(temp, 5)
+ vmax = np.percentile(temp, 95)
+ if cRangeint is True:
+ vmin = int(round(vmin))
+ vmax = int(round(vmax))
+ if ax is None:
+ fig = plt.figure(figsize=figsize)
+ ax = fig.subplots()
+ if len(data.squeeze().shape) == 1:
+ isGrid = False
+ else:
+ isGrid = True
+ if bounding is None:
+ # bounding = [np.min(lon)-0.5, np.max(lon)+0.5,
+ # np.min(lat)-0.5,np.max(lat)+0.5]
+ bounding = [np.min(lat)-0.5, np.max(lat)+0.5,
+ np.min(lon)-0.5,np.max(lon)+0.5]
+
+ # ax.set_extent(bounding, crs=ccrs.Geodetic())
+ # ax.add_feature(cfeature.OCEAN) # ocean backgroud
+ # ax.add_feature(cfeature.COASTLINE)
+ # ax.add_feature(cfeature.STATES, linestyle = ':')
+ # # ax.add_feature(cfeature.STATES.with_scale('110m'))
+
+ mm = basemap.Basemap(
+ llcrnrlat=bounding[0],
+ urcrnrlat=bounding[1],
+ llcrnrlon=bounding[2],
+ urcrnrlon=bounding[3],
+ projection=prj,
+ resolution='c',
+ ax=ax)
+ mm.drawcoastlines()
+ mm.drawstates(linestyle='dashed')
+ mm.drawcountries(linewidth=1.0, linestyle='-.')
+ x, y = mm(lon, lat)
+ # x, y = lon, lat
+ if isGrid is True:
+ xx, yy = np.meshgrid(x, y)
+ cs = mm.pcolormesh(xx, yy, data, cmap=cmap, vmin=vmin, vmax=vmax)
+ # cs = ax.pcolormesh(lon, lat, data, rasterized=True, )
+
+ # cs = mm.imshow(
+ # np.flipud(data),
+ # cmap=plt.cm.jet(np.arange(0, 1, 0.1)),
+ # vmin=vmin,
+ # vmax=vmax,
+ # extent=[x[0], x[-1], y[0], y[-1]])
+ else:
+
+ cs = mm.scatter(
+ x, y, c=data, s=30, cmap=cmap, vmin=vmin, vmax=vmax)
+
+ if shape is not None:
+ crd = np.array(shape.points)
+ par = shape.parts
+ if len(par) > 1:
+ for k in range(0, len(par) - 1):
+ x = crd[par[k]:par[k + 1], 0]
+ y = crd[par[k]:par[k + 1], 1]
+ mm.plot(x, y, color='r', linewidth=3)
+ else:
+ y = crd[:, 0]
+ x = crd[:, 1]
+ mm.plot(x, y, color='r', linewidth=3)
+ if pts is not None:
+ mm.plot(pts[1], pts[0], 'k*', markersize=4)
+ npt = len(pts[0])
+ for k in range(npt):
+ plt.text(
+ pts[1][k],
+ pts[0][k],
+ string.ascii_uppercase[k],
+ fontsize=18)
+ if clbar is True:
+ mm.colorbar(cs, pad='5%', location='bottom')
+ if title is not None:
+ ax.set_title(title)
+ if ax is None:
+ return fig, ax, mm
+ else:
+ return mm, cs
+
+
+def plotlocmap(
+ lat,
+ lon,
+ ax=None,
+ baclat=None,
+ baclon=None,
+ title=None,
+ shape=None,
+ txtlabel=None):
+ if ax is None:
+ fig = plt.figure(figsize=(8, 4))
+ ax = fig.subplots()
+ mm = basemap.Basemap(
+ llcrnrlat=min(np.min(baclat),np.min(lat))-2.0,
+ urcrnrlat=max(np.max(baclat),np.max(lat))+2.0,
+ llcrnrlon=min(np.min(baclon),np.min(lon))-1.0,
+ urcrnrlon=max(np.max(baclon),np.max(lon))+1.0,
+ projection='cyl',
+ resolution='c',
+ ax=ax)
+ mm.drawcoastlines()
+ mm.drawstates(linestyle='dashed')
+ mm.drawcountries(linewidth=1.0, linestyle='-.')
+ # x, y = mm(baclon, baclat)
+ # bs = mm.scatter(
+ # x, y, c='k', s=30)
+ x, y = mm(lon, lat)
+ ax.plot(x, y, 'k*', markersize=12)
+ if shape is not None:
+ crd = np.array(shape.points)
+ par = shape.parts
+ if len(par) > 1:
+ for k in range(0, len(par) - 1):
+ x = crd[par[k]:par[k + 1], 0]
+ y = crd[par[k]:par[k + 1], 1]
+ mm.plot(x, y, color='r', linewidth=3)
+ else:
+ y = crd[:, 0]
+ x = crd[:, 1]
+ mm.plot(x, y, color='r', linewidth=3)
+ if title is not None:
+ ax.set_title(title, loc='center')
+ if txtlabel is not None:
+ for ii in range(len(lat)):
+ txt = txtlabel[ii]
+ xy = (x[ii], y[ii])
+ xy = (x[ii]-3.0, y[ii]-1.5)
+ ax.annotate(txt, xy, fontsize=18, fontweight='bold')
+ if ax is None:
+ return fig, ax, mm
+ else:
+ return mm
+
+
+# def plotPUBloc(data,
+# *,
+# ax=None,
+# lat=None,
+# lon=None,
+# baclat=None,
+# baclon=None,
+# title=None,
+# cRange=None,
+# cRangeint=False,
+# shape=None,
+# isGrid=False,
+# fig = None):
+#
+# if cRange is not None:
+# vmin = cRange[0]
+# vmax = cRange[1]
+# else:
+# temp = flatData(data)
+# vmin = np.percentile(temp, 5)
+# vmax = np.percentile(temp, 95)
+# if cRangeint is True:
+# vmin = int(round(vmin))
+# vmax = int(round(vmax))
+# if ax is None:
+# # fig, ax = plt.figure(figsize=(8, 4))
+# fig = plt.figure(figsize=(8, 4))
+# ax = fig.subplots()
+# # if len(data.squeeze().shape) == 1:
+# # isGrid = False
+# # else:
+# # isGrid = True
+# bounding = [min(np.min(baclon),np.min(lon))-0.5, max(np.max(baclon),np.max(lon))+0.5,
+# min(np.min(baclat), np.min(lat))-0.5, max(np.max(baclat),np.max(lat))+0.5]
+# ax.set_extent(bounding, crs=ccrs.Geodetic())
+# ax.add_feature(cfeature.OCEAN) # ocean background
+# ax.add_feature(cfeature.COASTLINE)
+# ax.add_feature(cfeature.STATES, linestyle=':')
+# # ax.add_feature(cfeature.STATES.with_scale('110m'))
+#
+# # mm = basemap.Basemap(
+# # llcrnrlat=min(np.min(baclat),np.min(lat))-0.5,
+# # urcrnrlat=max(np.max(baclat),np.max(lat))+0.5,
+# # llcrnrlon=min(np.min(baclon),np.min(lon))-0.5,
+# # urcrnrlon=max(np.max(baclon),np.max(lon))+0.5,
+# # projection='cyl',
+# # resolution='c',
+# # ax=ax)
+# # mm.drawcoastlines()
+# # mm.drawstates(linestyle='dashed')
+# # mm.drawcountries(linewidth=0.5, linestyle='-.')
+# # x, y = mm
+# x, y = lon, lat
+#
+# # bs = mm.scatter(
+# # x, y, c='k', s=30)
+# # x, y = mm(lon, lat)
+#
+# bs = ax.scatter(x, y, c="k", s=30,)
+#
+# if isGrid is True:
+# xx, yy = np.meshgrid(x, y)
+# # cs = mm.pcolormesh(xx, yy, data, cmap=plt.cm.jet, vmin=vmin, vmax=vmax)
+# cs = ax.pcolormesh(lon, lat, data, rasterized=True, )
+# else:
+# # cs = mm.scatter(
+# # x, y, c=data, s=100, cmap=plt.cm.jet, vmin=vmin, vmax=vmax, marker='*')
+# cs = ax.scatter(
+# x, y, c=data, s=30, cmap=plt.cm.jet, vmin=vmin, vmax=vmax)
+#
+# if shape is not None:
+# crd = np.array(shape.points)
+# par = shape.parts
+# if len(par) > 1:
+# for k in range(0, len(par) - 1):
+# x = crd[par[k]:par[k + 1], 0]
+# y = crd[par[k]:par[k + 1], 1]
+# # mm.plot(x, y, color='r', linewidth=3)
+# ax.plot(x, y, color='r', linewidth=3)
+# else:
+# y = crd[:, 0]
+# x = crd[:, 1]
+# # mm.plot(x, y, color='r', linewidth=3)
+# ax.plot(x, y, color='r', linewidth=3)
+# # mm.colorbar(cs, location='bottom', pad='5%')
+# fig.colorbar(cs, ax=[ax], location="bottom", pad=.05, aspect=40, )
+# ax.set_aspect('auto', adjustable=None)
+#
+# if title is not None:
+# ax.set_title(title)
+# if ax is None:
+# # return fig, ax, mm
+# return fig, ax, ax
+# else:
+# return ax
+# # return mm
+
+def plotPUBloc(data,
+ *,
+ ax=None,
+ lat=None,
+ lon=None,
+ baclat=None,
+ baclon=None,
+ title=None,
+ cRange=None,
+ cRangeint=False,
+ shape=None):
+ if cRange is not None:
+ vmin = cRange[0]
+ vmax = cRange[1]
+ else:
+ temp = flatData(data)
+ vmin = np.percentile(temp, 5)
+ vmax = np.percentile(temp, 95)
+ if cRangeint is True:
+ vmin = int(round(vmin))
+ vmax = int(round(vmax))
+ if ax is None:
+ # fig, ax = plt.figure(figsize=(8, 4))
+ fig = plt.figure(figsize=(8, 4))
+ ax = fig.subplots()
+ # if len(data.squeeze().shape) == 1:
+ # isGrid = False
+ # else:
+ # isGrid = True
+ isGrid = False
+
+ mm = basemap.Basemap(
+ llcrnrlat=min(np.min(baclat),np.min(lat))-0.5,
+ urcrnrlat=max(np.max(baclat),np.max(lat))+0.5,
+ llcrnrlon=min(np.min(baclon),np.min(lon))-0.5,
+ urcrnrlon=max(np.max(baclon),np.max(lon))+0.5,
+ projection='cyl',
+ resolution='c',
+ ax=ax)
+ mm.drawcoastlines()
+ mm.drawstates(linestyle='dashed')
+ mm.drawcountries(linewidth=0.5, linestyle='-.')
+ x, y = mm(baclon, baclat)
+ bs = mm.scatter(
+ x, y, c='k', s=50)
+ x, y = mm(lon, lat)
+ if isGrid is True:
+ xx, yy = np.meshgrid(x, y)
+ cs = mm.pcolormesh(xx, yy, data, cmap=plt.cm.jet, vmin=vmin, vmax=vmax)
+ else:
+ cs = mm.scatter(
+ x, y, c=data, s=150, cmap=plt.cm.jet, vmin=vmin, vmax=vmax, marker='*')
+
+ if shape is not None:
+ crd = np.array(shape.points)
+ par = shape.parts
+ if len(par) > 1:
+ for k in range(0, len(par) - 1):
+ x = crd[par[k]:par[k + 1], 0]
+ y = crd[par[k]:par[k + 1], 1]
+ mm.plot(x, y, color='r', linewidth=3)
+ else:
+ y = crd[:, 0]
+ x = crd[:, 1]
+ mm.plot(x, y, color='r', linewidth=3)
+ # mm.colorbar(cs, location='bottom', pad='5%')
+ if title is not None:
+ ax.set_title(title)
+ if ax is None:
+ return fig, ax, mm
+ else:
+ return mm
+
+
+def plotTsMap(dataMap,
+ dataTs,
+ *,
+ lat,
+ lon,
+ t,
+ dataTs2=None,
+ tBar=None,
+ mapColor=None,
+ tsColor='krbg',
+ tsColor2='cmy',
+ mapNameLst=None,
+ tsNameLst=None,
+ tsNameLst2=None,
+ figsize=[9, 6],
+ isGrid=False,
+ multiTS=False,
+ linewidth=1):
+ if type(dataMap) is np.ndarray:
+ dataMap = [dataMap]
+ if type(dataTs) is np.ndarray:
+ dataTs = [dataTs]
+ if dataTs2 is not None:
+ if type(dataTs2) is np.ndarray:
+ dataTs2 = [dataTs2]
+ nMap = len(dataMap)
+
+ # setup axes
+ fig = plt.figure(figsize=figsize)
+ if multiTS is False:
+ nAx = 1
+ dataTs = [dataTs]
+ if dataTs2 is not None:
+ dataTs2 = [dataTs2]
+ else:
+ nAx = len(dataTs)
+ gs = gridspec.GridSpec(3 + nAx, nMap)
+ gs.update(wspace=0.025, hspace=0)
+ axTsLst = list()
+ for k in range(nAx):
+ axTs = fig.add_subplot(gs[k + 3, :])
+ axTsLst.append(axTs)
+ if dataTs2 is not None:
+ axTs2Lst = list()
+ for axTs in axTsLst:
+ axTs2 = axTs.twinx()
+ axTs2Lst.append(axTs2)
+
+ # plot maps
+ for k in range(nMap):
+ # ax = fig.add_subplot(gs[0:2, k])
+ crsProj = ccrs.PlateCarree() # set geographic cooridnates
+ ax = fig.add_subplot(gs[0:2, k], projection=crsProj, frameon=True)
+ cRange = None if mapColor is None else mapColor[k]
+ title = None if mapNameLst is None else mapNameLst[k]
+ data = dataMap[k]
+ if isGrid is False:
+ plotMap(data, lat=lat, lon=lon, fig=fig, ax=ax, cRange=cRange, title=title)
+ else:
+ grid, uy, ux = utils.grid.array2grid(data, lat=lat, lon=lon)
+ plotMap(grid, lat=uy, lon=ux, fig=fig, ax=ax, cRange=cRange, title=title)
+
+ # plot ts
+ def onclick(event):
+ xClick = event.xdata
+ yClick = event.ydata
+ d = np.sqrt((xClick - lon)**2 + (yClick - lat)**2)
+ ind = np.argmin(d)
+ # titleStr = 'pixel %d, lat %.3f, lon %.3f' % (ind, lat[ind], lon[ind])
+# titleStr = 'gage %d, lat %.3f, lon %.3f' % (ind, lat[ind], lon[ind])
+# ax.clear()
+# plotMap(data, lat=lat, lon=lon, ax=ax, cRange=cRange, title=title)
+# ax.plot(lon[ind], lat[ind], 'k*', markersize=12)
+ titleStr = 'pixel %d, lat %.3f, lon %.3f' % (ind, lat[ind], lon[ind])
+ for ix in range(nAx):
+ tsLst = list()
+ for temp in dataTs[ix]:
+ tsLst.append(temp[ind, :])
+ axTsLst[ix].clear()
+ if ix == 0:
+ plotTS(
+ t,
+ tsLst,
+ ax=axTsLst[ix],
+ legLst=tsNameLst,
+ title=titleStr,
+ cLst=tsColor,
+ linewidth=linewidth,
+ tBar=tBar)
+ else:
+ plotTS(
+ t,
+ tsLst,
+ ax=axTsLst[ix],
+ legLst=tsNameLst,
+ cLst=tsColor,
+ linewidth=linewidth,
+ tBar=tBar)
+
+ if dataTs2 is not None:
+ tsLst2 = list()
+ for temp in dataTs2[ix]:
+ tsLst2.append(temp[ind, :])
+ axTs2Lst[ix].clear()
+ plotTS(
+ t,
+ tsLst2,
+ ax=axTs2Lst[ix],
+ legLst=tsNameLst2,
+ cLst=tsColor2,
+ lineWidth=linewidth,
+ tBar=tBar)
+ if ix != nAx - 1:
+ axTsLst[ix].set_xticklabels([])
+ plt.draw()
+
+ fig.canvas.mpl_connect('button_press_event', onclick)
+ plt.tight_layout()
+ plt.show()
+
+def plotTsMapGage(dataMap,
+ dataTs,
+ *,
+ lat,
+ lon,
+ t,
+ colorMap=None,
+ mapNameLst=None,
+ tsNameLst=None,
+ figsize=[12, 6]):
+ if type(dataMap) is np.ndarray:
+ dataMap = [dataMap]
+ if type(dataTs) is np.ndarray:
+ dataTs = [dataTs]
+ nMap = len(dataMap)
+ nTs = len(dataTs)
+
+ fig = plt.figure(figsize=figsize, constrained_layout=True)
+ gs = gridspec.GridSpec(3, nMap)
+
+ for k in range(nMap):
+ ax = fig.add_subplot(gs[0:2, k])
+ cRange = None if colorMap is None else colorMap[k]
+ title = None if mapNameLst is None else mapNameLst[k]
+ data = dataMap[k]
+ if len(data.squeeze().shape) == 1:
+ plotMap(data, lat=lat, lon=lon, ax=ax, cRange=cRange, title=title)
+ else:
+ grid, uy, ux = utils.grid.array2grid(data, lat=lat, lon=lon)
+ plotMap(grid, lat=uy, lon=ux, ax=ax, cRange=cRange, title=title)
+ axTs = fig.add_subplot(gs[2, :])
+
+ def onclick(event):
+ xClick = event.xdata
+ yClick = event.ydata
+ d = np.sqrt((xClick - lon)**2 + (yClick - lat)**2)
+ ind = np.argmin(d)
+ # titleStr = 'pixel %d, lat %.3f, lon %.3f' % (ind, lat[ind], lon[ind])
+ titleStr = 'gage %d, lat %.3f, lon %.3f' % (ind, lat[ind], lon[ind])
+ ax.clear()
+ plotMap(data, lat=lat, lon=lon, ax=ax, cRange=cRange, title=title)
+ ax.plot(lon[ind], lat[ind], 'k*', markersize=12)
+ # ax.draw(renderer=None)
+ tsLst = list()
+ for k in range(nTs):
+ tsLst.append(dataTs[k][ind, :])
+ axTs.clear()
+ plotTS(t, tsLst, ax=axTs, legLst=tsNameLst, title=titleStr)
+ plt.draw()
+
+ fig.canvas.mpl_connect('button_press_event', onclick)
+ plt.tight_layout()
+ plt.show()
+
+
+def plotCDF(xLst,
+ *,
+ ax=None,
+ title=None,
+ legendLst=None,
+ figsize=(8, 6),
+ ref='121',
+ cLst=None,
+ xlabel=None,
+ ylabel=None,
+ showDiff='RMSE',
+ xlim=None,
+ linespec=None,
+ markLst=None):
+ if ax is None:
+ fig = plt.figure(figsize=figsize)
+ ax = fig.subplots()
+ else:
+ fig = None
+
+ if cLst is None:
+ cmap = plt.cm.jet
+ cLst = cmap(np.linspace(0, 1, len(xLst)))
+
+ if title is not None:
+ ax.set_title(title, loc='left')
+ if xlabel is not None:
+ ax.set_xlabel(xlabel)
+ if ylabel is not None:
+ ax.set_ylabel(ylabel)
+
+ xSortLst = list()
+ yRankLst = list()
+ rmseLst = list()
+ ksdLst = list()
+ for k in range(0, len(xLst)):
+ x = xLst[k]
+ xSort = flatData(x)
+ yRank = np.arange(len(xSort)) / float(len(xSort) - 1)
+ xSortLst.append(xSort)
+ yRankLst.append(yRank)
+ if legendLst is None:
+ legStr = None
+ else:
+ legStr = legendLst[k]
+ if ref is not None:
+ if ref is '121':
+ yRef = yRank
+ elif ref is 'norm':
+ yRef = scipy.stats.norm.cdf(xSort, 0, 1)
+ rmse = np.sqrt(((xSort - yRef)**2).mean())
+ ksd = np.max(np.abs(xSort - yRef))
+ rmseLst.append(rmse)
+ ksdLst.append(ksd)
+ if showDiff is 'RMSE':
+ legStr = legStr + ' RMSE=' + '%.3f' % rmse
+ elif showDiff is 'KS':
+ legStr = legStr + ' KS=' + '%.3f' % ksd
+ if markLst is None:
+ ax.plot(xSort, yRank, color=cLst[k], label=legStr, linestyle=linespec[k])
+ else:
+ ax.plot(xSort, yRank, color=cLst[k], label=legStr, linestyle=linespec[k], marker=markLst[k], markevery=60)
+ ax.grid(b=True)
+ ax.axhline(0.5, xmin=0, xmax=1, color='k',
+ linestyle='-', linewidth=1.0)
+ if xlim is not None:
+ ax.set(xlim=xlim)
+ if ref is '121':
+ ax.plot([0, 1], [0, 1], 'k', label='y=x')
+ if ref is 'norm':
+ xNorm = np.linspace(-5, 5, 1000)
+ normCdf = scipy.stats.norm.cdf(xNorm, 0, 1)
+ ax.plot(xNorm, normCdf, 'k', label='Gaussian')
+ if legendLst is not None:
+ ax.legend(loc='best', frameon=False)
+ # out = {'xSortLst': xSortLst, 'rmseLst': rmseLst, 'ksdLst': ksdLst}
+ return fig, ax
+
+def plotFDC(xLst,
+ *,
+ ax=None,
+ title=None,
+ legendLst=None,
+ figsize=(8, 6),
+ ref='121',
+ cLst=None,
+ xlabel=None,
+ ylabel=None,
+ showDiff='RMSE',
+ xlim=None,
+ linespec=None):
+ if ax is None:
+ fig = plt.figure(figsize=figsize)
+ ax = fig.subplots()
+ else:
+ fig = None
+
+ if cLst is None:
+ cmap = plt.cm.jet
+ cLst = cmap(np.linspace(0, 1, len(xLst)))
+
+ if title is not None:
+ ax.set_title(title, loc='center')
+ if xlabel is not None:
+ ax.set_xlabel(xlabel)
+ if ylabel is not None:
+ ax.set_ylabel(ylabel)
+
+ xSortLst = list()
+ rmseLst = list()
+ ksdLst = list()
+ for k in range(0, len(xLst)):
+ x = xLst[k]
+ xSort = flatData(x, sortOpt=1)
+ yRank = np.arange(1, len(xSort)+1) / float(len(xSort) + 1)*100
+ xSortLst.append(xSort)
+ if legendLst is None:
+ legStr = None
+ else:
+ legStr = legendLst[k]
+ if ref is not None:
+ if ref is '121':
+ yRef = yRank
+ elif ref is 'norm':
+ yRef = scipy.stats.norm.cdf(xSort, 0, 1)
+ rmse = np.sqrt(((xSort - yRef)**2).mean())
+ ksd = np.max(np.abs(xSort - yRef))
+ rmseLst.append(rmse)
+ ksdLst.append(ksd)
+ if showDiff is 'RMSE':
+ legStr = legStr + ' RMSE=' + '%.3f' % rmse
+ elif showDiff is 'KS':
+ legStr = legStr + ' KS=' + '%.3f' % ksd
+ ax.plot(yRank, xSort, color=cLst[k], label=legStr, linestyle=linespec[k])
+ ax.grid(b=True)
+ if xlim is not None:
+ ax.set(xlim=xlim)
+ if ref is '121':
+ ax.plot([0, 1], [0, 1], 'k', label='y=x')
+ if ref is 'norm':
+ xNorm = np.linspace(-5, 5, 1000)
+ normCdf = scipy.stats.norm.cdf(xNorm, 0, 1)
+ ax.plot(xNorm, normCdf, 'k', label='Gaussian')
+ if legendLst is not None:
+ ax.legend(loc='best', frameon=False)
+ # out = {'xSortLst': xSortLst, 'rmseLst': rmseLst, 'ksdLst': ksdLst}
+ return fig, ax
+
+
+def flatData(x, sortOpt=0):
+ # sortOpt: 0: small to large, 1: large to small, -1: no sort
+ xArrayTemp = x.flatten()
+ xArray = xArrayTemp[~np.isnan(xArrayTemp)]
+ if sortOpt == 0:
+ xSort = np.sort(xArray)
+ elif sortOpt == 1:
+ xSort = np.sort(xArray)[::-1]
+ elif sortOpt == -1:
+ xSort = xArray
+
+ return (xSort)
+
+
+def scaleSigma(s, u, y):
+ yNorm = (y - u) / s
+ _, sF = scipy.stats.norm.fit(flatData(yNorm))
+ return sF
+
+
+def reCalSigma(s, u, y):
+ conf = scipy.special.erf(np.abs(y - u) / s / np.sqrt(2))
+ yNorm = (y - u) / s
+ return conf, yNorm
+
+
+def regLinear(y, x):
+ ones = np.ones(len(x[0]))
+ X = sm.add_constant(np.column_stack((x[0], ones)))
+ for ele in x[1:]:
+ X = sm.add_constant(np.column_stack((ele, X)))
+ out = sm.OLS(y, X).fit()
+ return out
diff --git a/post/stat.py b/post/stat.py
new file mode 100644
index 0000000..5bf5c3a
--- /dev/null
+++ b/post/stat.py
@@ -0,0 +1,84 @@
+import numpy as np
+import scipy.stats
+
+
+keyLst = ['Bias', 'RMSE', 'ubRMSE', 'Corr']
+
+
+def statError(pred, target):
+ ngrid, nt = pred.shape
+ # Bias
+ Bias = np.nanmean(pred - target, axis=1)
+ # RMSE
+ RMSE = np.sqrt(np.nanmean((pred - target)**2, axis=1))
+ # ubRMSE
+ predMean = np.tile(np.nanmean(pred, axis=1), (nt, 1)).transpose()
+ targetMean = np.tile(np.nanmean(target, axis=1), (nt, 1)).transpose()
+ predAnom = pred - predMean
+ targetAnom = target - targetMean
+ ubRMSE = np.sqrt(np.nanmean((predAnom - targetAnom)**2, axis=1))
+
+
+ # rho R2 NSE
+ Corr = np.full(ngrid, np.nan)
+ CorrSp = np.full(ngrid, np.nan)
+ R2 = np.full(ngrid, np.nan)
+ NSE = np.full(ngrid, np.nan)
+ PBiaslow = np.full(ngrid, np.nan)
+ PBiashigh = np.full(ngrid, np.nan)
+ PBias = np.full(ngrid, np.nan)
+ PBiasother = np.full(ngrid, np.nan)
+ KGE = np.full(ngrid, np.nan)
+ KGE12 = np.full(ngrid, np.nan)
+ RMSElow = np.full(ngrid, np.nan)
+ RMSEhigh = np.full(ngrid, np.nan)
+ RMSEother = np.full(ngrid, np.nan)
+ for k in range(0, ngrid):
+ x = pred[k, :]
+ y = target[k, :]
+ ind = np.where(np.logical_and(~np.isnan(x), ~np.isnan(y)))[0]
+ if ind.shape[0] > 0:
+ xx = x[ind]
+ yy = y[ind]
+ # percent bias
+ PBias[k] = np.sum(xx - yy) / np.sum(yy) * 100
+
+ # FHV the peak flows bias 2%
+ # FLV the low flows bias bottom 30%, log space
+ pred_sort = np.sort(xx)
+ target_sort = np.sort(yy)
+ indexlow = round(0.3 * len(pred_sort))
+ indexhigh = round(0.98 * len(pred_sort))
+ lowpred = pred_sort[:indexlow]
+ highpred = pred_sort[indexhigh:]
+ otherpred = pred_sort[indexlow:indexhigh]
+ lowtarget = target_sort[:indexlow]
+ hightarget = target_sort[indexhigh:]
+ othertarget = target_sort[indexlow:indexhigh]
+ PBiaslow[k] = np.sum(abs(lowpred - lowtarget)) / (np.sum(lowtarget+0.0001) )* 100
+ PBiashigh[k] = np.sum(abs(highpred - hightarget) )/ np.sum(hightarget) * 100
+ PBiasother[k] = np.sum(abs(otherpred - othertarget)) / np.sum(othertarget) * 100
+ RMSElow[k] = np.sqrt(np.nanmean((lowpred - lowtarget)**2))
+ RMSEhigh[k] = np.sqrt(np.nanmean((highpred - hightarget)**2))
+ RMSEother[k] = np.sqrt(np.nanmean((otherpred - othertarget)**2))
+
+ if ind.shape[0] > 1:
+ # Theoretically at least two points for correlation
+ Corr[k] = scipy.stats.pearsonr(xx, yy)[0]
+ CorrSp[k] = scipy.stats.spearmanr(xx, yy)[0]
+ yymean = yy.mean()
+ yystd = np.std(yy)
+ xxmean = xx.mean()
+ xxstd = np.std(xx)
+ KGE[k] = 1 - np.sqrt((Corr[k]-1)**2 + (xxstd/yystd-1)**2 + (xxmean/yymean-1)**2)
+ KGE12[k] = 1 - np.sqrt((Corr[k] - 1) ** 2 + ((xxstd*yymean)/ (yystd*xxmean) - 1) ** 2 + (xxmean / yymean - 1) ** 2)
+ SST = np.sum((yy-yymean)**2)
+ SSReg = np.sum((xx-yymean)**2)
+ SSRes = np.sum((yy-xx)**2)
+ R2[k] = 1-SSRes/SST
+ NSE[k] = 1-SSRes/SST
+
+ outDict = dict(Bias=Bias, RMSE=RMSE, ubRMSE=ubRMSE, Corr=Corr, CorrSp=CorrSp, R2=R2, NSE=NSE,
+ FLV=PBiaslow, FHV=PBiashigh, PBias=PBias, PBiasother=PBiasother, KGE=KGE, KGE12=KGE12,
+ lowRMSE=RMSElow, highRMSE=RMSEhigh, midRMSE=RMSEother)
+ return outDict
diff --git a/torch_new.yml b/torch_new.yml
new file mode 100644
index 0000000..d647204
--- /dev/null
+++ b/torch_new.yml
@@ -0,0 +1,134 @@
+name: torch_new
+channels:
+ - anaconda
+ - conda-forge
+ - defaults
+dependencies:
+ - _libgcc_mutex=0.1=main
+ - _openmp_mutex=5.1=1_gnu
+ - blas=1.0=openblas
+ - bottleneck=1.3.5=py38h7deecbd_0
+ - brotli=1.0.9=h166bdaf_7
+ - brotli-bin=1.0.9=h166bdaf_7
+ - ca-certificates=2023.01.10=h06a4308_0
+ - certifi=2022.12.7=py38h06a4308_0
+ - contourpy=1.0.5=py38hdb19cb5_0
+ - cycler=0.11.0=pyhd8ed1ab_0
+ - dbus=1.13.18=hb2f20db_0
+ - expat=2.2.10=h9c3ff4c_0
+ - fftw=3.3.9=h27cfd23_1
+ - fontconfig=2.14.1=hef1e5e3_0
+ - fonttools=4.25.0=pyhd3eb1b0_0
+ - freetype=2.10.4=h0708190_1
+ - giflib=5.2.1=h36c2ea0_2
+ - glib=2.69.1=he621ea3_2
+ - gst-plugins-base=1.14.1=h6a678d5_1
+ - gstreamer=1.14.1=h5eee18b_1
+ - icu=58.2=hf484d3e_1000
+ - importlib_resources=5.12.0=pyhd8ed1ab_0
+ - jpeg=9e=h166bdaf_1
+ - keyutils=1.6.1=h166bdaf_0
+ - kiwisolver=1.4.4=py38h6a678d5_0
+ - krb5=1.19.3=h3790be6_0
+ - lcms2=2.12=h3be6417_0
+ - ld_impl_linux-64=2.38=h1181459_1
+ - lerc=3.0=h295c915_0
+ - libblas=3.9.0=15_linux64_openblas
+ - libbrotlicommon=1.0.9=h166bdaf_7
+ - libbrotlidec=1.0.9=h166bdaf_7
+ - libbrotlienc=1.0.9=h166bdaf_7
+ - libcblas=3.9.0=15_linux64_openblas
+ - libclang=10.0.1=default_hb85057a_2
+ - libdeflate=1.17=h5eee18b_0
+ - libedit=3.1.20191231=he28a2e2_2
+ - libevent=2.1.12=h8f2d780_0
+ - libffi=3.4.2=h6a678d5_6
+ - libgcc-ng=11.2.0=h1234567_1
+ - libgfortran-ng=12.2.0=h69a702a_19
+ - libgfortran5=12.2.0=h337968e_19
+ - libgomp=11.2.0=h1234567_1
+ - liblapack=3.9.0=15_linux64_openblas
+ - libllvm10=10.0.1=he513fc3_3
+ - libopenblas=0.3.20=pthreads_h78a6416_0
+ - libpng=1.6.39=h5eee18b_0
+ - libpq=12.9=h16c4e8d_3
+ - libstdcxx-ng=11.2.0=h1234567_1
+ - libtiff=4.5.0=h6a678d5_2
+ - libwebp=1.2.4=h11a3e52_1
+ - libwebp-base=1.2.4=h5eee18b_1
+ - libxcb=1.15=h7f8727e_0
+ - libxkbcommon=1.0.1=hfa300c1_0
+ - libxml2=2.9.14=h74e7548_0
+ - libxslt=1.1.35=h4e12654_0
+ - lz4-c=1.9.3=h9c3ff4c_1
+ - matplotlib=3.7.1=py38h578d9bd_0
+ - matplotlib-base=3.7.1=py38h417a72b_0
+ - munkres=1.1.4=pyh9f0ad1d_0
+ - ncurses=6.4=h6a678d5_0
+ - nspr=4.33=h295c915_0
+ - nss=3.74=h0370c37_0
+ - numexpr=2.8.4=py38hd2a5715_0
+ - openssl=1.1.1s=h7f8727e_0
+ - packaging=23.0=pyhd8ed1ab_0
+ - patsy=0.5.3=py38h06a4308_0
+ - pcre=8.45=h9c3ff4c_0
+ - pip=22.3.1=py38h06a4308_0
+ - ply=3.11=py_1
+ - pyparsing=3.0.9=pyhd8ed1ab_0
+ - pyqt=5.15.7=py38h6a678d5_1
+ - pyqt5-sip=12.11.0=py38h6a678d5_1
+ - python=3.8.16=h7a1cb2a_2
+ - python-dateutil=2.8.2=pyhd8ed1ab_0
+ - python_abi=3.8=2_cp38
+ - qt-main=5.15.2=h327a75a_7
+ - qt-webengine=5.15.9=hd2b0992_4
+ - qtwebkit=5.212=h4eab89a_4
+ - readline=8.2=h5eee18b_0
+ - scipy=1.7.3=py38hf838250_2
+ - setuptools=65.6.3=py38h06a4308_0
+ - sip=6.6.2=py38h6a678d5_0
+ - six=1.16.0=pyh6c4a22f_0
+ - sqlite=3.40.1=h5082296_0
+ - statsmodels=0.13.5=py38h7deecbd_0
+ - tk=8.6.12=h1ccaba5_0
+ - toml=0.10.2=pyhd8ed1ab_0
+ - tornado=6.1=py38h0a891b7_3
+ - wheel=0.37.1=pyhd3eb1b0_0
+ - xz=5.2.10=h5eee18b_1
+ - zipp=3.15.0=pyhd8ed1ab_0
+ - zlib=1.2.13=h5eee18b_0
+ - zstd=1.5.2=ha4553b6_0
+ - pip:
+ - charset-normalizer==3.1.0
+ - cmake==3.26.0
+ - filelock==3.10.0
+ - idna==3.4
+ - jinja2==3.1.2
+ - lit==15.0.7
+ - markupsafe==2.1.2
+ - mpmath==1.3.0
+ - networkx==3.0
+ - numpy==1.24.2
+ - nvidia-cublas-cu11==11.10.3.66
+ - nvidia-cuda-cupti-cu11==11.7.101
+ - nvidia-cuda-nvrtc-cu11==11.7.99
+ - nvidia-cuda-runtime-cu11==11.7.99
+ - nvidia-cudnn-cu11==8.5.0.96
+ - nvidia-cufft-cu11==10.9.0.58
+ - nvidia-curand-cu11==10.2.10.91
+ - nvidia-cusolver-cu11==11.4.0.1
+ - nvidia-cusparse-cu11==11.7.4.91
+ - nvidia-nccl-cu11==2.14.3
+ - nvidia-nvtx-cu11==11.7.91
+ - pandas==1.5.3
+ - pillow==9.4.0
+ - pytz==2022.7.1
+ - requests==2.28.2
+ - sympy==1.11.1
+ - torch==2.0.0
+ - torchaudio==2.0.1
+ - torchvision==0.15.1
+ - triton==2.0.0
+ - typing-extensions==4.5.0
+ - urllib3==1.26.15
+prefix: /storage/home/yxs275/.conda/envs/torch_new
diff --git a/utils/rout.py b/utils/rout.py
new file mode 100644
index 0000000..db2293d
--- /dev/null
+++ b/utils/rout.py
@@ -0,0 +1,45 @@
+import torch
+
+import torch.nn.functional as F
+def UH_gamma(a,b,lenF=10):
+ # UH. a [time (same all time steps), batch, var]
+ m = a.shape
+ w = torch.zeros([lenF, m[1],m[2]])
+ aa = F.relu(a[0:lenF,:,:]).view([lenF, m[1],m[2]])+0.1 # minimum 0.1. First dimension of a is repeat
+ theta = F.relu(b[0:lenF,:,:]).view([lenF, m[1],m[2]])+0.5 # minimum 0.5
+ t = torch.arange(0.5,lenF*1.0).view([lenF,1,1]).repeat([1,m[1],m[2]])
+ t = t.cuda(aa.device)
+ denom = (aa.lgamma().exp())*(theta**aa)
+ mid= t**(aa-1)
+ right=torch.exp(-t/theta)
+ w = 1/denom*mid*right
+ w = w/w.sum(0) # scale to 1 for each UH
+
+ return w.to(a)
+
+def UH_conv(x,UH,viewmode=1):
+ # UH is a vector indicating the unit hydrograph
+ # the convolved dimension will be the last dimension
+ # UH convolution is
+ # Q(t)=\integral(x(\tao)*UH(t-\tao))d\tao
+ # conv1d does \integral(w(\tao)*x(t+\tao))d\tao
+ # hence we flip the UH
+ # https://programmer.group/pytorch-learning-conv1d-conv2d-and-conv3d.html
+ # view
+ # x: [batch, var, time]
+ # UH:[batch, var, uhLen]
+ # batch needs to be accommodated by channels and we make use of groups
+ # https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
+ # https://pytorch.org/docs/stable/nn.functional.html
+
+ mm= x.shape; nb=mm[0]
+ m = UH.shape[-1]
+ padd = m-1
+ if viewmode==1:
+ xx = x.view([1,nb,mm[-1]])
+ w = UH.view([nb,1,m])
+ groups = nb
+
+ y = F.conv1d(xx, torch.flip(w,[2]), groups=groups, padding=padd, stride=1, bias=None)
+ y=y[:,:,0:-padd]
+ return y.view(mm)
\ No newline at end of file