From 1cc028e525588119307ac2a43385d2789181569c Mon Sep 17 00:00:00 2001
From: John Wang <zhengwangdataanalyst@gmail.com>
Date: Thu, 25 Jan 2024 16:26:14 +0000
Subject: [PATCH 1/9] move common methods to Abstract

---
 whobpyt/datatypes/AbstractNMM.py | 35 +++++++++++++++++++++++++++++---
 1 file changed, 32 insertions(+), 3 deletions(-)

diff --git a/whobpyt/datatypes/AbstractNMM.py b/whobpyt/datatypes/AbstractNMM.py
index 9793ad5a..c6beafac 100644
--- a/whobpyt/datatypes/AbstractNMM.py
+++ b/whobpyt/datatypes/AbstractNMM.py
@@ -1,11 +1,13 @@
 import torch
-
+from whobpyt.datatypes.parameter import par
+from torch.nn.parameter import Parameter
 class AbstractNMM(torch.nn.Module):
     # This is the abstract class for the models (typically neural mass models) that are being trained. 
     # The neuroimaging modality might be integrated into the model as well. 
 
-    def __init__(self):
+    def __init__(self, params):
         super(AbstractNMM, self).__init__() # May not want to enherit from torch.nn.Module in the future 
+        self.params = params
         
         self.state_names = ["None"] # The names of the state variables of the model
         self.output_names = ["None"] # The variable to be used as output from the NMM, for purposes such as the input to an objective function
@@ -25,7 +27,34 @@ def setModelParameters(self):
         # Setting the parameters that will be optimized as either model parameters or 2ndLevel/hyper
         # parameters (for various optional features). 
         # This should be called in the __init__() function implementation for convenience if possible.
-        pass
+        # If use_fit_lfm is True, set lm as an attribute as type Parameter (containing variance information)
+        param_reg = []
+        param_hyper = []
+
+        var_names = [a for a in dir(self.params) if (type(getattr(self.params, a)) == par)]
+        for var_name in var_names:
+            var = getattr(self.params, var_name)
+            if (var.fit_hyper):
+                if var_name == 'lm':
+                    size = var.prior_var.shape
+                    var.val = Parameter(var.val.detach() - 1 * torch.ones((size[0], size[1]))) # TODO: This is not consistent with what user would expect giving a variance 
+                    param_hyper.append(var.prior_mean)
+                    param_hyper.append(var.prior_var)
+                elif (var != 'std_in'):
+                    var.randSet() #TODO: This should be done before giving params to model class
+                    param_hyper.append(var.prior_mean)
+                    param_hyper.append(var.prior_var)
+
+            if (var.fit_par):
+                param_reg.append(var.val) #TODO: This should got before fit_hyper, but need to change where randomness gets added in the code first 
+                
+            if (var.fit_par | var.fit_hyper):
+                self.track_params.append(var_name) #NMM Parameters
+            
+            if var_name == 'lm':
+                setattr(self, var_name, var.val)
+            
+        self.params_fitted = {'modelparameter': param_reg,'hyperparameter': param_hyper}
         
     def createIC(self, ver):
         # Create the initial conditions for the model state variables. 

From aa508830746ff22e4dee81afbcfd1659916c6f99 Mon Sep 17 00:00:00 2001
From: John Wang <zhengwangdataanalyst@gmail.com>
Date: Thu, 25 Jan 2024 16:27:03 +0000
Subject: [PATCH 2/9] move common methods from each model to Abstract

---
 whobpyt/models/BOLD/BOLD.py                |   7 +-
 whobpyt/models/EEG/EEG.py                  |  63 ++-
 whobpyt/models/EEG/ParamsEEG.py            |   9 +
 whobpyt/models/JansenRit/jansen_rit.py     |  73 +--
 whobpyt/models/RWW/wong_wang.py            | 544 +++++++--------------
 whobpyt/models/RWWEI2/Multimodal_RWWEI2.py | 104 ++--
 whobpyt/models/RWWEI2/RWWEI2.py            |   9 +-
 7 files changed, 305 insertions(+), 504 deletions(-)

diff --git a/whobpyt/models/BOLD/BOLD.py b/whobpyt/models/BOLD/BOLD.py
index 4ec143da..86594fa9 100644
--- a/whobpyt/models/BOLD/BOLD.py
+++ b/whobpyt/models/BOLD/BOLD.py
@@ -1,7 +1,7 @@
 import torch
 from whobpyt.datatypes import AbstractMode
    
-class BOLD_Layer(AbstractMode):
+class BOLD_Layer(torch.nn.Module):
     '''
     Balloon-Windkessel Hemodynamic Response Function Forward Model
     
@@ -29,7 +29,10 @@ def __init__(self, num_regions, params, useBC = False, device = torch.device('cp
         self.device = device
         
         self.num_blocks = 1
-        
+        self.params_fitted = {}
+        self.params_fitted['modelparameter'] =[]
+        self.params_fitted['hyperparameter'] =[]
+        self.track_params = []
         self.params = params
         
         self.setModelParameters()
diff --git a/whobpyt/models/EEG/EEG.py b/whobpyt/models/EEG/EEG.py
index 410b1164..78c994d6 100644
--- a/whobpyt/models/EEG/EEG.py
+++ b/whobpyt/models/EEG/EEG.py
@@ -1,7 +1,7 @@
 import torch
 from whobpyt.datatypes import AbstractMode
 
-class EEG_Layer(AbstractMode):
+class EEG_Layer(torch.nn.Module):
     '''
     
     Lead Field Matrix multiplication which converts Source Space EEG to Channel Space EEG
@@ -21,7 +21,11 @@ def __init__(self, num_regions, params, num_channels, device = torch.device('cpu
         
         self.num_regions = num_regions
         self.num_channels = num_channels
+        self.params_fitted = {}
+        self.params_fitted['modelparameter'] =[]
+        self.params_fitted['hyperparameter'] =[]
         
+        self.track_params = []
         self.device = device
         
         self.num_blocks = 1
@@ -34,44 +38,39 @@ def info(self):
         return {"state_names": ["None"], "output_name": "eeg"}
             
     def setModelParameters(self):
-        return setModelParameters(self)
+        self.LF = self.params.LF.to(self.device)
         
     def createIC(self, ver):
         pass
     
     def forward(self, step_size, sim_len, node_history, device = torch.device('cpu')):
-        return forward(self, step_size, sim_len, node_history)   
-
-def setModelParameters(self):
-    #############################################
-    ## EEG Lead Field
-    #############################################
-    
-    self.LF = self.params.LF.to(self.device)
-    
-def forward(self, step_size, sim_len, node_history):
+        hE = torch.tensor(1.0).to(self.device) #Dummy variable
     
-    hE = torch.tensor(1.0).to(self.device) #Dummy variable
-    
-    # Runs the EEG Model
-    #
-    # INPUT
-    #  step_size: Float - The step size in msec which must match node_history step size.
-    #  sim_len: Int - The amount of EEG to simulate in msec, and should match time simulated in node_history. 
-    #  node_history: Tensor - [time_points, regions, num_blocks] # This would be input coming from NMM
-    #  device: torch.device - Whether to run on GPU or CPU - default is CPU and GPU has not been tested for Network_NMM code
-    #
-    # OUTPUT
-    #  layer_history: Tensor - [time_steps, regions, num_blocks]
-    #
+        # Runs the EEG Model
+        #
+        # INPUT
+        #  step_size: Float - The step size in msec which must match node_history step size.
+        #  sim_len: Int - The amount of EEG to simulate in msec, and should match time simulated in node_history. 
+        #  node_history: Tensor - [time_points, regions, num_blocks] # This would be input coming from NMM
+        #  device: torch.device - Whether to run on GPU or CPU - default is CPU and GPU has not been tested for Network_NMM code
+        #
+        # OUTPUT
+        #  layer_history: Tensor - [time_steps, regions, num_blocks]
+        #
+        
+        layer_hist = torch.zeros(int((sim_len/step_size)/self.num_blocks), self.num_channels, self.num_blocks).to(self.device)
+        print(layer_hist.shape)
+        num_steps = int((sim_len/step_size)/self.num_blocks)
+        for i in range(num_steps):
+            layer_hist[i, :, :] = torch.matmul(self.LF, node_history[i, :, :]) # TODO: Check dimensions and if correct transpose of LF
+        
+        sim_vals = {}
+        sim_vals["eeg"] = layer_hist.permute((1,0,2)) # time x node x batch -> node x time x batch
     
-    layer_hist = torch.zeros(int((sim_len/step_size)/self.num_blocks), self.num_channels, self.num_blocks).to(self.device)
+        return sim_vals, hE   
+
 
-    num_steps = int((sim_len/step_size)/self.num_blocks)
-    for i in range(num_steps):
-        layer_hist[i, :, :] = torch.matmul(self.LF, node_history[i, :, :]) # TODO: Check dimensions and if correct transpose of LF
     
-    sim_vals = {}
-    sim_vals["eeg"] = layer_hist.permute((1,0,2)) # time x node x batch -> node x time x batch
 
-    return sim_vals, hE
\ No newline at end of file
+    
+    
\ No newline at end of file
diff --git a/whobpyt/models/EEG/ParamsEEG.py b/whobpyt/models/EEG/ParamsEEG.py
index fe80b1e1..51b29e31 100644
--- a/whobpyt/models/EEG/ParamsEEG.py
+++ b/whobpyt/models/EEG/ParamsEEG.py
@@ -9,4 +9,13 @@ def __init__(self, Lead_Field):
         #############################################
         
         self.LF = Lead_Field # This should be [num_regions, num_channels]
+        
+    def to(self, device):
+        # Moves all parameters between CPU and GPU
+        
+        vars_names = [a for a in dir(self) if not a.startswith('__')]
+        for var_name in vars_names:
+            var = getattr(self, var_name)
+            if (type(var) == par):
+                var.to(device)
         
\ No newline at end of file
diff --git a/whobpyt/models/JansenRit/jansen_rit.py b/whobpyt/models/JansenRit/jansen_rit.py
index 65975c96..da7f6e47 100644
--- a/whobpyt/models/JansenRit/jansen_rit.py
+++ b/whobpyt/models/JansenRit/jansen_rit.py
@@ -87,9 +87,10 @@ class RNNJANSEN(AbstractNMM):
     
     """
 
-    def __init__(self, node_size: int,
-                 TRs_per_window: int, step_size: float, output_size: int, tr: float, sc: np.ndarray, lm: np.ndarray, dist: np.ndarray,
-                 use_fit_gains: bool, use_fit_lfm: bool, params: ParamsJR) -> None:               
+    def __init__(self, params: ParamsJR, node_size=200,
+                 TRs_per_window= 20, step_size=0.0001, output_size=64, tr=0.001, sc=np.ones((200,200)), \
+                 lm=np.ones((64,200)), dist=np.ones((200,200)),
+                 use_fit_gains=True, use_fit_lfm=False):               
         """
         Parameters
         ----------
@@ -118,7 +119,7 @@ def __init__(self, node_size: int,
         """
         method_arg_type_check(self.__init__) # Check that the passed arguments (excluding self) abide by their expected data types
         
-        super(RNNJANSEN, self).__init__()
+        super(RNNJANSEN, self).__init__(params)
         self.state_names = ['E', 'Ev', 'I', 'Iv', 'P', 'Pv']
         self.output_names = ["eeg"]
         self.track_params = [] #Is populated during setModelParameters()
@@ -140,18 +141,9 @@ def __init__(self, node_size: int,
         self.output_size = lm.shape[0]  # number of EEG channels
         
         self.setModelParameters()
+        self.setModelSCParameters()
 
-    def info(self):
-        # TODO: Make sure this method is useful
-        """
-        Returns a dictionary with the names of the states and the output.
-
-        Returns
-        -------
-        Dict[str, List[str]]
-        """
-
-        return {"state_names": ['E', 'Ev', 'I', 'Iv', 'P', 'Pv'], "output_names": ["eeg"]}
+    
     
     def createIC(self, ver):
         """
@@ -195,14 +187,11 @@ def createDelayIC(self, ver):
 
         return torch.tensor(np.random.uniform(state_lb, state_ub, (self.node_size, delays_max)), dtype=torch.float32)
     
-    def setModelParameters(self):
+    def setModelSCParameters(self):
         """
         Sets the parameters of the model.
         """
-
-        param_reg = []
-        param_hyper = []
-
+        
         # Set w_bb, w_ff, and w_ll as attributes as type Parameter if use_fit_gains is True
         if self.use_fit_gains:
             self.w_bb = Parameter(torch.tensor(np.zeros((self.node_size, self.node_size)) + 0.05, # the backwards gains
@@ -211,45 +200,15 @@ def setModelParameters(self):
                                                 dtype=torch.float32))
             self.w_ll = Parameter(torch.tensor(np.zeros((self.node_size, self.node_size)) + 0.05, # the lateral gains
                                                 dtype=torch.float32))
-            param_reg.append(self.w_ll)
-            param_reg.append(self.w_ff)
-            param_reg.append(self.w_bb)
+            self.params_fitted['modelparameter'].append(self.w_ll)
+            self.params_fitted['modelparameter'].append(self.w_ff)
+            self.params_fitted['modelparameter'].append(self.w_bb)
         else:
             self.w_bb = torch.tensor(np.zeros((self.node_size, self.node_size)), dtype=torch.float32)
             self.w_ff = torch.tensor(np.zeros((self.node_size, self.node_size)), dtype=torch.float32)
             self.w_ll = torch.tensor(np.zeros((self.node_size, self.node_size)), dtype=torch.float32)
 
-        # If use_fit_lfm is True, set lm as an attribute as type Parameter (containing variance information)
-        if self.use_fit_lfm:
-            self.lm = Parameter(torch.tensor(self.lm, dtype=torch.float32))  # leadfield matrix from sourced data to eeg
-            param_reg.append(self.lm)
-        else:
-            self.lm = torch.tensor(self.lm, dtype=torch.float32)
-
-        var_names = [a for a in dir(self.params) if (type(getattr(self.params, a)) == par)]
-        for var_name in var_names:
-            var = getattr(self.params, var_name)
-            if (var.fit_hyper):
-                if var_name == 'lm':
-                    size = var.prior_var.shape
-                    var.val = Parameter(var.val.detach() - 1 * torch.ones((size[0], size[1]))) # TODO: This is not consistent with what user would expect giving a variance 
-                    param_hyper.append(var.prior_mean)
-                    param_hyper.append(var.prior_var)
-                elif (var != 'std_in'):
-                    var.randSet() #TODO: This should be done before giving params to model class
-                    param_hyper.append(var.prior_mean)
-                    param_hyper.append(var.prior_var)
-
-            if (var.fit_par):
-                param_reg.append(var.val) #TODO: This should got before fit_hyper, but need to change where randomness gets added in the code first 
-                
-            if (var.fit_par | var.fit_hyper):
-                self.track_params.append(var_name) #NMM Parameters
-            
-            if var_name == 'lm':
-                setattr(self, var_name, var.val)
-            
-        self.params_fitted = {'modelparameter': param_reg,'hyperparameter': param_hyper}
+        
 
 
     def forward(self, external, hx, hE):
@@ -313,6 +272,8 @@ def forward(self, external, hx, hE):
         
         g_f = (lb * con_1 + m(self.params.g_f.value()))
         g_b = (lb * con_1 + m(self.params.g_b.value()))
+        
+        lm = self.params.lm.value()
 
 
         next_state = {}
@@ -424,9 +385,9 @@ def forward(self, external, hx, hE):
             hE = torch.cat([M, hE[:, :-1]], dim=1)  # update placeholders for pyramidal buffer
 
             # Capture the states at every tr in the placeholders which is then used in the cost calculation.
-            lm_t = (self.lm.T / torch.sqrt(self.lm ** 2).sum(1)).T
+            lm_t = (lm.T / torch.sqrt(lm ** 2).sum(1)).T
             self.lm_t = (lm_t - 1 / self.output_size * torch.matmul(torch.ones((1, self.output_size)), lm_t))
-            temp = cy0 * torch.matmul(self.lm_t, M[:200, :]) - 1 * y0
+            temp = cy0 * torch.matmul(self.lm_t, E-I) - 1 * y0
             eeg_window.append(temp)
 
         # Update the current state.
diff --git a/whobpyt/models/RWW/wong_wang.py b/whobpyt/models/RWW/wong_wang.py
index bfe3ab07..93270ac4 100644
--- a/whobpyt/models/RWW/wong_wang.py
+++ b/whobpyt/models/RWW/wong_wang.py
@@ -25,12 +25,16 @@ class RNNRWW(AbstractNMM):
     Attributes
     ---------
     
-    state_names: list
-        A list of model state variable names
+    state_names: an array of the list
+        An array of list of model state variable names
+    pop_names: an array of list
+        An array of list of population names
     output_names: list
         A list of model output variable names
     model_name: string
         The name of the model itself
+    pop_size : int in this model just one
+        The number of population in the WWD model
     state_size : int
         The number of states in the WWD model
     tr : float
@@ -43,22 +47,14 @@ class RNNRWW(AbstractNMM):
         The number of BOLD TRs to simulate in one forward call
     node_size: int
         The number of ROIs
-    sampling_size: int
-        This is related to an averaging of NMM values before inputing into hemodynamic equaitons. This is non-standard.        
+         
     sc: float node_size x node_size array
         The structural connectivity matrix
     sc_fitted: bool
         The fitted structural connectivity
     use_fit_gains: tensor with node_size x node_size (grad on depends on fit_gains)
         Whether to fit the structural connectivity, will fit via connection gains: exp(gains_con)*sc
-    use_Laplacian: bool
-        Whether to use the negative laplacian of the (fitted) structural connectivity as the structural connectivity
-    use_Bifurcation: bool
-        Use a custom objective function component
-    use_Gaussian_EI: bool
-        Use a custom objective function component
-    use_dynamic_boundary: bool
-        Whether to have tanh function applied at each time step to constrain parameter values. Simulation results will become dependent on a certian step_size. 
+    
     params: ParamsRWW
         A object that contains the parameters for the RWW nodes
     params_fitted: dictionary
@@ -92,13 +88,10 @@ class RNNRWW(AbstractNMM):
         std for state noise and output noise
 
     """
-    use_fit_lfm = False
-    input_size = 2
+    
 
-    def __init__(self, node_size: int,
-                 TRs_per_window: int, step_size: float, sampling_size: float, tr: float, sc: float, use_fit_gains: bool,
-                 params: ParamsRWW, use_Bifurcation: bool = True, use_Gaussian_EI: bool = False, use_Laplacian: bool = True,
-                 use_dynamic_boundary: bool = True) -> None:
+    def __init__(self, params: ParamsRWW, node_size = 68, TRs_per_window = 20, step_size = 0.05,  \
+                   tr=1.0, sc=np.ones((68,68)), use_fit_gains= True):
         """
         Parameters
         ----------
@@ -109,8 +102,6 @@ def __init__(self, node_size: int,
             The number of BOLD TRs to simulate in one forward call    
         step_size: float
             Integration step for forward model
-        sampling_size:
-            This is related to an averaging of NMM values before inputing into hemodynamic equaitons. This is non-standard. 
         tr : float
             tr of fMRI image. That is, the spacing betweeen images in the time series. 
         sc: float node_size x node_size array
@@ -118,23 +109,14 @@ def __init__(self, node_size: int,
         use_fit_gains: bool
             Whether to fit the structural connectivity, will fit via connection gains: exp(gains_con)*sc
         params: ParamsRWW
-            A object that contains the parameters for the RWW nodes
-        use_Bifurcation: bool
-            Use a custom objective function component
-        use_Gaussian_EI: bool
-            Use a custom objective function component
-        use_Laplacian: bool
-            Whether to use the negative laplacian of the (fitted) structural connectivity as the structural connectivity
-        use_dynamic_boundary: bool
-            Whether to have tanh function applied at each time step to constrain parameter values. Simulation results will become dependent on a certian step_size.
+            A object that contains the parameters for the RWW nodes.
         """        
         method_arg_type_check(self.__init__) # Check that the passed arguments (excluding self) abide by their expected data types
         
-        super(RNNRWW, self).__init__()
+        super(RNNRWW, self).__init__(params)
         
         self.state_names = ['E', 'I', 'x', 'f', 'v', 'q']
         self.output_names = ["bold"]
-        self.track_params = [] #Is populated during setModelParameters()
         
         self.model_name = "RWW"
         self.state_size = 6  # 6 states WWD model
@@ -144,42 +126,16 @@ def __init__(self, node_size: int,
         self.steps_per_TR = int(tr / step_size)
         self.TRs_per_window = TRs_per_window  # size of the batch used at each step
         self.node_size = node_size  # num of ROI
-        self.sampling_size = sampling_size
         self.sc = sc  # matrix node_size x node_size structure connectivity
         self.sc_fitted = torch.tensor(sc, dtype=torch.float32)  # placeholder
         self.use_fit_gains = use_fit_gains  # flag for fitting gains
-        self.use_Laplacian = use_Laplacian
-        self.use_Bifurcation = use_Bifurcation
-        self.use_Gaussian_EI = use_Gaussian_EI
-        self.use_dynamic_boundary = use_dynamic_boundary
-        self.params = params
-        self.params_fitted = {}
-
+        
         self.output_size = node_size
         
         self.setModelParameters()
+        self.setModelSCParameters()
     
-    def info(self):
-        """
-        
-        A function that returns a dictionary with model information.
-        
-        Parameters
-        ----------
-        
-        None
-        
-        
-        Returns
-        ----------
-        
-        Dictionary of Lists
-            The List contain State Names and Output Names 
-        
-        
-        """
     
-        return {"state_names": ['E', 'I', 'x', 'f', 'v', 'q'], "output_names": ["bold"]}
     
     def createIC(self, ver):
         """
@@ -206,33 +162,45 @@ def createIC(self, ver):
         return torch.tensor(0.2 * np.random.uniform(0, 1, (self.node_size, self.state_size)) + np.array(
                 [0, 0, 0, 1.0, 1.0, 1.0]), dtype=torch.float32)
 
-    def setModelParameters(self):
+    
+    def createDelayIC(self, ver):
         """
-        
-        A function that assigns model parameters as model attributes and also to assign parameters and hyperparameters for fitting, 
-        so that the inherited Torch functionality can be used. 
-        This practice may be replaced soon. 
+        Creates the initial conditions for the delays.
 
-        
         Parameters
         ----------
-        
-        None
-        
-        
+        ver : int
+            Initial condition version. (in the JR model, the version is not used. It is just for consistency with other models)
+
         Returns
-        ----------
+        -------
+        torch.Tensor
+            Tensor of shape (node_size, delays_max) with random values between `state_lb` and `state_ub`.
+        """
+
+        delays_max = 500
+        state_ub = 0.5
+        state_lb = 0.1
+
+        return torch.tensor(np.random.uniform(state_lb, state_ub, (self.node_size,  delays_max)), dtype=torch.float32)
+    
+    def setModelSCParameters(self):
         
-        Dictionary of Lists
-            Keys are State Names and Output Names (with _window appended to the name)
-            Contents are the time series from model simulation
+        """
+        Sets the parameters of the model.
+        """
+
         
-        """    
-    
-    
-        # set states E I f v mean and 1/sqrt(variance)
-        return setModelParameters(self)
 
+        # Set w_bb, w_ff, and w_ll as attributes as type Parameter if use_fit_gains is True
+        if self.use_fit_gains:
+            
+            self.w_ll = Parameter(torch.tensor(np.zeros((self.node_size, self.node_size)) + 0.05, # the lateral gains
+                                                dtype=torch.float32))
+            self.params_fitted['modelparameter'].append(self.w_ll)
+        else:
+            self.w_ll = torch.tensor(np.zeros((self.node_size, self.node_size)), dtype=torch.float32)
+        
     def forward(self, external, hx, hE):
         """
         
@@ -240,7 +208,7 @@ def forward(self, external, hx, hE):
         
         Parameters
         ----------
-        external: tensor with node_size x steps_per_TR x TRs_per_window x input_size
+        external: tensor with node_size x pop_size x steps_per_TR x TRs_per_window x input_size
             noise for states
         
         hx: tensor with node_size x state_size
@@ -251,198 +219,108 @@ def forward(self, external, hx, hE):
         next_state: dictionary with Tensors
             Tensor dimension [Num_Time_Points, Num_Regions]
         
-        with keys: 'current_state''bold_window''E_window''I_window''x_window''f_window''v_window''q_window'
+        with keys: 'current_state''states_window''bold_window'
             record new states and BOLD
             
         """
-        
-        return integration_forward(self, external, hx, hE)
-
-def h_tf(a, b, d, z):
-    """
-    Neuronal input-output functions of excitatory pools and inhibitory pools.
-    Take the variables a, x, and b and convert them to a linear equation (a*x - b) while adding a small
-    amount of noise 0.00001 while dividing that term to an exponential of the linear equation multiplied by the
-    d constant for the appropriate dimensions.
-    """
-    num = 0.00001 + torch.abs(a * z - b)
-    den = 0.00001 * d + torch.abs(1.0000 - torch.exp(-d * (a * z - b)))
-    return torch.divide(num, den)
-
-def setModelParameters(model):
-    param_reg = [] #NMM Equation Parameters
-    param_hyper = [] #Mean and Variance of NMM Equation Parameters, and others
+        # Generate the ReLU module for model parameters gEE gEI and gIE
+        m = torch.nn.ReLU()
     
-    if model.use_Gaussian_EI:
-        model.E_m = Parameter(torch.tensor(0.16, dtype=torch.float32))
-        param_hyper.append(model.E_m)
-        model.I_m = Parameter(torch.tensor(0.1, dtype=torch.float32))
-        param_hyper.append(model.I_m)
-        # model.f_m = Parameter(torch.tensor(1.0, dtype=torch.float32))
-        model.v_m = Parameter(torch.tensor(1.0, dtype=torch.float32))
-        param_hyper.append(model.v_m)
-        # model.x_m = Parameter(torch.tensor(0.16, dtype=torch.float32))
-        model.q_m = Parameter(torch.tensor(1.0, dtype=torch.float32))
-        param_hyper.append(model.q_m)
-
-        model.E_v_inv = Parameter(torch.tensor(2500, dtype=torch.float32))
-        param_hyper.append(model.E_v_inv)
-        model.I_v_inv = Parameter(torch.tensor(2500, dtype=torch.float32))
-        param_hyper.append(model.I_v_inv)
-        # model.f_v = Parameter(torch.tensor(100, dtype=torch.float32))
-        model.v_v_inv = Parameter(torch.tensor(100, dtype=torch.float32))
-        param_hyper.append(model.v_v_inv)
-        # model.x_v = Parameter(torch.tensor(100, dtype=torch.float32))
-        model.q_v_inv = Parameter(torch.tensor(100, dtype=torch.float32))
-        param_hyper.append(model.v_v_inv)
-
-    # hyper parameters (variables: need to calculate gradient) to fit density
-    # of gEI and gIE (the shape from the bifurcation analysis on an isolated node)
-    if model.use_Bifurcation:
-        model.sup_ca = Parameter(torch.tensor(0.5, dtype=torch.float32))
-        param_hyper.append(model.sup_ca)
-        model.sup_cb = Parameter(torch.tensor(20, dtype=torch.float32))
-        param_hyper.append(model.sup_cb)
-        model.sup_cc = Parameter(torch.tensor(10, dtype=torch.float32))
-        param_hyper.append(model.sup_cc)
-
-    # set gains_con as Parameter if fit_gain is True
-    if model.use_fit_gains:
-        model.gains_con = Parameter(torch.tensor(np.zeros((model.node_size, model.node_size)) + 0.05,
-                                                 dtype=torch.float32))  # connenction gain to modify empirical sc
-        param_reg.append(model.gains_con)
-    else:
-        model.gains_con = torch.tensor(np.zeros((model.node_size, model.node_size)), dtype=torch.float32)
-
-    var_names = [a for a in dir(model.params) if not a.startswith('__')]
-    for var_name in var_names:
-        var = getattr(model.params, var_name)
-        if (type(var) == par): 
-            if (var.fit_hyper == True):
-                var.randSet() #TODO: This should be done before giving params to model class
-                param_hyper.append(var.prior_mean)
-                param_hyper.append(var.prior_var) #TODO: Currently this is _v_inv but should set everything to just variance unless there is a reason to keep the inverse?
-                
-            if (var.fit_par == True):
-                param_reg.append(var.val) #TODO: This should got before fit_hyper, but need to change where randomness gets added in the code first                
-                model.track_params.append(var_name)
-            
-            if (var.fit_par | var.fit_hyper):
-                model.track_params.append(var_name) #NMM Parameters
-
-    model.params_fitted = {'modelparameter': param_reg,'hyperparameter': param_hyper}
-
-def integration_forward(model, external, hx, hE):
-
-    # Generate the ReLU module for model parameters gEE gEI and gIE
-    m = torch.nn.ReLU()
-
-
-    # Defining NMM Parameters to simplify later equations
-    std_in = (0.02 + m(model.params.std_in.value()))  # standard deviation of the Gaussian noise
-    std_out = (0.00 + m(model.params.std_out.value()))  # standard deviation of the Gaussian noise
     
-    # Parameters for the ODEs
-    # Excitatory population
-    W_E = model.params.W_E.value()  # scale of the external input
-    tau_E = model.params.tau_E.value()  # decay time
-    gamma_E = model.params.gamma_E.value()  # other dynamic parameter (?)
-
-    # Inhibitory population
-    W_I = model.params.W_I.value()  # scale of the external input
-    tau_I = model.params.tau_I.value()  # decay time
-    gamma_I = model.params.gamma_I.value()  # other dynamic parameter (?)
-
-    # External input
-    I_0 = model.params.I_0.value()  # external input
-    I_external = model.params.I_external.value()  # external stimulation
-
-    # Coupling parameters
-    g = model.params.g.value()  # global coupling (from all nodes E_j to single node E_i)
-    g_EE = (0.001 + m(model.params.g_EE.value()))  # local self excitatory feedback (from E_i to E_i)
-    g_IE = (0.001 + m(model.params.g_IE.value()))  # local inhibitory coupling (from I_i to E_i)
-    g_EI = (0.001 + m(model.params.g_EI.value()))  # local excitatory coupling (from E_i to I_i)
-
-    aE = model.params.aE.value()
-    bE = model.params.bE.value()
-    dE = model.params.dE.value()
-    aI = model.params.aI.value()
-    bI = model.params.bI.value()
-    dI = model.params.dI.value()
-
-    # Output (BOLD signal)
-    alpha = model.params.alpha.value()
-    rho = model.params.rho.value()
-    k1 = model.params.k1.value()
-    k2 = model.params.k2.value()
-    k3 = model.params.k3.value()  # adjust this number from 0.48 for BOLD fluctruate around zero
-    V = model.params.V.value()
-    E0 = model.params.E0.value()
-    tau_s = model.params.tau_s.value()
-    tau_f = model.params.tau_f.value()
-    tau_0 = model.params.tau_0.value()
-    mu = model.params.mu.value()
-
-
-    next_state = {}
-
-    # hx is current state (6) 0: E 1:I (neural activities) 2:x 3:f 4:v 5:f (BOLD)
-
-    x = hx[:, 2:3]
-    f = hx[:, 3:4]
-    v = hx[:, 4:5]
-    q = hx[:, 5:6]
-
-    dt = torch.tensor(model.step_size, dtype=torch.float32)
-
-    # Update the Laplacian based on the updated connection gains gains_con.
-    if model.sc.shape[0] > 1:
-
+        # Defining NMM Parameters to simplify later equations
+        std_in =  0.02+m(self.params.std_in.value())  # 0.02 the lower bound (standard deviation of the Gaussian noise)
+        
+        # Parameters for the ODEs
+        # Excitatory population
+        W_E = self.params.W_E.value()  # scale of the external input
+        tau_E = self.params.tau_E.value()  # decay time
+        gamma_E = self.params.gamma_E.value()  # other dynamic parameter (?)
+    
+        # Inhibitory population
+        W_I = self.params.W_I.value()  # scale of the external input
+        tau_I = self.params.tau_I.value()  # decay time
+        gamma_I = self.params.gamma_I.value()  # other dynamic parameter (?)
+    
+        # External input
+        I_0 = self.params.I_0.value()  # external input
+        I_external = self.params.I_external.value()  # external stimulation
+    
+        # Coupling parameters
+        g = self.params.g.value()  # global coupling (from all nodes E_j to single node E_i)
+        g_EE =  m(self.params.g_EE.value())  # local self excitatory feedback (from E_i to E_i)
+        g_IE = m(self.params.g_IE.value())  # local inhibitory coupling (from I_i to E_i)
+        g_EI = m(self.params.g_EI.value())  # local excitatory coupling (from E_i to I_i)
+    
+        aE = self.params.aE.value()
+        bE = self.params.bE.value()
+        dE = self.params.dE.value()
+        aI = self.params.aI.value()
+        bI = self.params.bI.value()
+        dI = self.params.dI.value()
+    
+        # Output (BOLD signal)
+        alpha = self.params.alpha.value()
+        rho = self.params.rho.value()
+        k1 = self.params.k1.value()
+        k2 = self.params.k2.value()
+        k3 = self.params.k3.value()  # adjust this number from 0.48 for BOLD fluctruate around zero
+        V = self.params.V.value()
+        E0 = self.params.E0.value()
+        tau_s = self.params.tau_s.value()
+        tau_f = self.params.tau_f.value()
+        tau_0 = self.params.tau_0.value()
+        mu = self.params.mu.value()
+    
+    
+        next_state = {}
+    
+        # hx is current state (6) 0: E 1:I (neural activities) 2:x 3:f 4:v 5:f (BOLD)
+    
+        x = hx[:,2:3]
+        f = hx[:,3:4]
+        v = hx[:,4:5]
+        q = hx[:,5:6]
+    
+        dt = torch.tensor(self.step_size, dtype=torch.float32)
+    
         # Update the Laplacian based on the updated connection gains gains_con.
-        sc_mod = torch.exp(model.gains_con) * torch.tensor(model.sc, dtype=torch.float32)
-        sc_mod_normalized = (0.5 * (sc_mod + torch.transpose(sc_mod, 0, 1))) / torch.linalg.norm(
-            0.5 * (sc_mod + torch.transpose(sc_mod, 0, 1)))
-        model.sc_fitted = sc_mod_normalized
-
-        if model.use_Laplacian:
+        if self.sc.shape[0] > 1:
+    
+            # Update the Laplacian based on the updated connection gains gains_con.
+            sc_mod = torch.exp(self.w_ll) * torch.tensor(self.sc, dtype=torch.float32)
+            sc_mod_normalized = (0.5 * (sc_mod + torch.transpose(sc_mod, 0, 1))) / torch.linalg.norm(
+                0.5 * (sc_mod + torch.transpose(sc_mod, 0, 1)))
+            self.sc_fitted = sc_mod_normalized
+    
             lap_adj = -torch.diag(sc_mod_normalized.sum(1)) + sc_mod_normalized
+            
         else:
-            lap_adj = sc_mod_normalized
-
-    else:
-        lap_adj = torch.tensor(np.zeros((1, 1)), dtype=torch.float32)
-
-    # placeholder for the updated current state
-    current_state = torch.zeros_like(hx)
-
-    # placeholders for output BOLD, history of E I x f v and q
-    # placeholders for output BOLD, history of E I x f v and q
-    bold_window = torch.zeros((model.node_size, model.TRs_per_window))
-    # E_window = torch.zeros((model.node_size,model.TRs_per_window))
-    # I_window = torch.zeros((model.node_size,model.TRs_per_window))
-
-    x_window = torch.zeros((model.node_size, model.TRs_per_window))
-    f_window = torch.zeros((model.node_size, model.TRs_per_window))
-    v_window = torch.zeros((model.node_size, model.TRs_per_window))
-    q_window = torch.zeros((model.node_size, model.TRs_per_window))
-
-    E_hist = torch.zeros((model.node_size, model.TRs_per_window, model.steps_per_TR))
-    I_hist = torch.zeros((model.node_size, model.TRs_per_window, model.steps_per_TR))
-    E_mean = hx[:, 0:1]
-    I_mean = hx[:, 1:2]
-
-    # Use the forward model to get neural activity at ith element in the window.
-    if model.use_dynamic_boundary:
-        for TR_i in range(model.TRs_per_window):
+            lap_adj = torch.tensor(np.zeros((1, 1)), dtype=torch.float32)
+    
+        
+    
+        # placeholders for output BOLD, history of E I x f v and q
+        # placeholders for output BOLD, history of E I x f v and q
+        bold_window = []
+        E_window = []
+        I_window = []
+        x_window = []
+        f_window = []
+        q_window = []
+        v_window = []
+    
+        
+        
+        E = hx[:,0:1]
+        I = hx[:,1:2]
+        #print(E.shape)
+        # Use the forward model to get neural activity at ith element in the window.
+        
+        for TR_i in range(self.TRs_per_window):
 
             # Since tr is about second we need to use a small step size like 0.05 to integrate the model states.
-            for step_i in range(model.steps_per_TR):
-                E = torch.zeros((model.node_size, model.sampling_size))
-                I = torch.zeros((model.node_size, model.sampling_size))
-                for sample_i in range(model.sampling_size):
-                    E[:, sample_i] = E_mean[:, 0] + 0.02 * torch.randn(model.node_size)
-                    I[:, sample_i] = I_mean[:, 0] + 0.001 * torch.randn(model.node_size)
-
+            for step_i in range(self.steps_per_TR):
+                
                 # Calculate the input recurrent.
                 IE = torch.tanh(m(W_E * I_0 + g_EE * E + g * torch.matmul(lap_adj, E) - g_IE * I))  # input currents for E
                 II = torch.tanh(m(W_I * I_0 + g_EI * E - I))  # input currents for I
@@ -453,9 +331,9 @@ def integration_forward(model, external, hx, hE):
                 
                 # Update the states by step-size 0.05.
                 E_next = E + dt * (-E * torch.reciprocal(tau_E) + gamma_E * (1. - E) * rE) \
-                         + torch.sqrt(dt) * torch.randn(model.node_size, model.sampling_size) * std_in  ### equlibrim point at E=(tau_E*gamma_E*rE)/(1+tau_E*gamma_E*rE)
+                         + torch.sqrt(dt) * torch.randn(self.node_size, 1) * std_in  
                 I_next = I + dt * (-I * torch.reciprocal(tau_I) + gamma_I * rI) \
-                         + torch.sqrt(dt) * torch.randn(model.node_size, model.sampling_size) * std_in
+                         + torch.sqrt(dt) * torch.randn(self.node_size, 1) * std_in
 
                 # Calculate the saturation for model states (for stability and gradient calculation).
 
@@ -463,118 +341,58 @@ def integration_forward(model, external, hx, hE):
                 E = torch.tanh(0.0000 + m(1.0 * E_next))
                 I = torch.tanh(0.0000 + m(1.0 * I_next))
 
-                I_mean = I.mean(1)[:, np.newaxis]
-                E_mean = E.mean(1)[:, np.newaxis]
-                I_mean[I_mean < 0.00001] = 0.00001
-                E_mean[E_mean < 0.00001] = 0.00001
-
-                E_hist[:, TR_i, step_i] = E_mean[:, 0]
-                I_hist[:, TR_i, step_i] = I_mean[:, 0]
-
-        for TR_i in range(model.TRs_per_window):
+                
 
-            for step_i in range(model.steps_per_TR):
-                x_next = x + 1 * dt * (1 * E_hist[:, TR_i, step_i][:, np.newaxis] - torch.reciprocal(tau_s) * x \
+                
+                x_next = x + 1 * dt * (1 * E - torch.reciprocal(tau_s) * x \
                          - torch.reciprocal(tau_f) * (f - 1))
                 f_next = f + 1 * dt * x
                 v_next = v + 1 * dt * (f - torch.pow(v, torch.reciprocal(alpha))) * torch.reciprocal(tau_0)
                 q_next = q + 1 * dt * (f * (1 - torch.pow(1 - rho, torch.reciprocal(f))) * torch.reciprocal(rho) \
                          - q * torch.pow(v, torch.reciprocal(alpha)) * torch.reciprocal(v)) * torch.reciprocal(tau_0)
-
+    
                 x = torch.tanh(x_next)
                 f = (1 + torch.tanh(f_next - 1))
                 v = (1 + torch.tanh(v_next - 1))
                 q = (1 + torch.tanh(q_next - 1))
-                
+            
             # Put x f v q from each tr to the placeholders for checking them visually.
-            x_window[:, TR_i] = x[:, 0]
-            f_window[:, TR_i] = f[:, 0]
-            v_window[:, TR_i] = v[:, 0]
-            q_window[:, TR_i] = q[:, 0]
+            E_window.append(E)
+            I_window.append(I)
+            x_window.append(x)
+            f_window.append(f)
+            v_window.append(v)
+            q_window.append(q)
 
             # Put the BOLD signal each tr to the placeholder being used in the cost calculation.
-            bold_window[:, TR_i] = (std_out * torch.randn(model.node_size, 1) +
+            bold_window.append((0.01 * torch.randn(self.node_size, 1) +
                                     100.0 * V * torch.reciprocal(E0) *
-                                    (k1 * (1 - q) + k2 * (1 - q * torch.reciprocal(v)) + k3 * (1 - v)))[:, 0]
-    else:
-
-        for TR_i in range(model.TRs_per_window):
-
-            # Since tr is about second we need to use a small step size like 0.05 to integrate the model states.
-            for step_i in range(model.steps_per_TR):
-                E = torch.zeros((model.node_size, model.sampling_size))
-                I = torch.zeros((model.node_size, model.sampling_size))
-                for sample_i in range(model.sampling_size):
-                    E[:, sample_i] = E_mean[:, 0] + 0.001 * torch.randn(model.node_size)
-                    I[:, sample_i] = I_mean[:, 0] + 0.001 * torch.randn(model.node_size)
-
-                # Calculate the input recurrent.
-                IE = 1 * torch.tanh(m(W_E * I_0 + g_EE * E + g * torch.matmul(lap_adj, E) - g_IE * I))  # input currents for E
-                II = 1 * torch.tanh(m(W_I * I_0 + g_EI * E - I))  # input currents for I
-
-                # Calculate the firing rates.
-                rE = h_tf(aE, bE, dE, IE)  # firing rate for E
-                rI = h_tf(aI, bI, dI, II)  # firing rate for I
-                
-                # Update the states by step-size dt.
-                E_next = E + dt * (-E * torch.reciprocal(tau_E) + gamma_E * (1. - E) * rE) \
-                         + torch.sqrt(dt) * torch.randn(model.node_size, model.sampling_size) * std_in  ### equlibrim point at E=(tau_E*gamma_E*rE)/(1+tau_E*gamma_E*rE)
-                I_next = I + dt * (-I * torch.reciprocal(tau_I) + gamma_I * rI) \
-                         + torch.sqrt(dt) * torch.randn(model.node_size, model.sampling_size) * std_in
-
-                # Calculate the saturation for model states (for stability and gradient calculation).
-                E_next[E_next < 0.00001] = 0.00001
-                I_next[I_next < 0.00001] = 0.00001
-                # E_next[E_next>=0.9] = torch.tanh(1.6358*E_next[E_next>=0.9])
-                E = E_next  # torch.tanh(0.00001+m(1.0*E_next))
-                I = I_next  # torch.tanh(0.00001+m(1.0*I_next))
-
-                I_mean = I.mean(1)[:, np.newaxis]
-                E_mean = E.mean(1)[:, np.newaxis]
-                E_hist[:, TR_i, step_i] = torch.tanh(E_mean)[:, 0]
-                I_hist[:, TR_i, step_i] = torch.tanh(I_mean)[:, 0]
-
-            # E_window[:,TR_i]=E_mean[:,0]
-            # I_window[:,TR_i]=I_mean[:,0]
-
-        for TR_i in range(model.TRs_per_window):
-
-            for step_i in range(model.steps_per_TR):
-                x_next = x + 1 * dt * (1 * E_hist[:, TR_i, step_i][:, np.newaxis] - torch.reciprocal(tau_s) * x \
-                         - torch.reciprocal(tau_f) * (f - 1))
-                f_next = f + 1 * dt * x
-                v_next = v + 1 * dt * (f - torch.pow(v, torch.reciprocal(alpha))) * torch.reciprocal(tau_0)
-                q_next = q + 1 * dt * (f * (1 - torch.pow(1 - rho, torch.reciprocal(f))) * torch.reciprocal(rho) \
-                         - q * torch.pow(v, torch.reciprocal(alpha)) * torch.reciprocal(v)) * torch.reciprocal(tau_0)
-
-                f_next[f_next < 0.001] = 0.001
-                v_next[v_next < 0.001] = 0.001
-                q_next[q_next < 0.001] = 0.001
-                x = x_next  # torch.tanh(x_next)
-                f = f_next  # (1 + torch.tanh(f_next - 1))
-                v = v_next  # (1 + torch.tanh(v_next - 1))
-                q = q_next  # (1 + torch.tanh(q_next - 1))
-                
-            # Put x f v q from each tr to the placeholders for checking them visually.
-            x_window[:, TR_i] = x[:, 0]
-            f_window[:, TR_i] = f[:, 0]
-            v_window[:, TR_i] = v[:, 0]
-            q_window[:, TR_i] = q[:, 0]
-            
-            # Put the BOLD signal each tr to the placeholder being used in the cost calculation.
-            bold_window[:, TR_i] = (std_out * torch.randn(model.node_size, 1) +
-                                    100.0 * V * torch.reciprocal(E0) * 
-                                    (k1 * (1 - q) + k2 * (1 - q * torch.reciprocal(v)) + k3 * (1 - v)))[:, 0]
-
-    # Update the current state.
-    current_state = torch.cat([E_mean, I_mean, x, f, v, q], dim=1)
-    next_state['current_state'] = current_state
-    next_state['bold'] = bold_window
-    next_state['E'] = E_hist.reshape((model.node_size, -1))
-    next_state['I'] = I_hist.reshape((model.node_size, -1))
-    next_state['x'] = x_window
-    next_state['f'] = f_window
-    next_state['v'] = v_window
-    next_state['q'] = q_window
+                                    (k1 * (1 - q) + k2 * (1 - q * torch.reciprocal(v)) + k3 * (1 - v))))
+        
+    
+        # Update the current state.
+        current_state = torch.cat([E, I, x,\
+                  f, v, q], dim=1)
+        next_state['current_state'] = current_state
+        next_state['bold'] = torch.cat(bold_window, dim =1)
+        next_state['E'] = torch.cat(E_window, dim =1)
+        next_state['I'] = torch.cat(I_window, dim =1)
+        next_state['x'] = torch.cat(x_window, dim =1)
+        next_state['f'] = torch.cat(f_window, dim =1)
+        next_state['v'] = torch.cat(v_window, dim =1)
+        next_state['q'] = torch.cat(q_window, dim =1)
+        
+        return next_state, hE
+        
+        
 
-    return next_state, hE
\ No newline at end of file
+def h_tf(a, b, d, z):
+    """
+    Neuronal input-output functions of excitatory pools and inhibitory pools.
+    Take the variables a, x, and b and convert them to a linear equation (a*x - b) while adding a small
+    amount of noise 0.00001 while dividing that term to an exponential of the linear equation multiplied by the
+    d constant for the appropriate dimensions.
+    """
+    num = 0.00001 + torch.abs(a * z - b)
+    den = 0.00001 * d + torch.abs(1.0000 - torch.exp(-d * (a * z - b)))
+    return torch.divide(num, den)
\ No newline at end of file
diff --git a/whobpyt/models/RWWEI2/Multimodal_RWWEI2.py b/whobpyt/models/RWWEI2/Multimodal_RWWEI2.py
index 3b027d56..87c10a76 100644
--- a/whobpyt/models/RWWEI2/Multimodal_RWWEI2.py
+++ b/whobpyt/models/RWWEI2/Multimodal_RWWEI2.py
@@ -6,24 +6,26 @@
 from whobpyt.models.BOLD import BOLD_Layer, BOLD_Params
 from whobpyt.models.EEG import EEG_Layer, EEG_Params
 
-class RWWEI2_EEG_BOLD(RWWEI2):
+class RWWEI2_EEG_BOLD(torch.nn.Module):
 
     model_name = "RWWEI2_EEG_BOLD"
     
     def __init__(self, num_regions, num_channels, paramsNode, paramsEEG, paramsBOLD, Con_Mtx, dist_mtx, step_size, sim_len, device = torch.device('cpu')):
-
+        super(RWWEI2_EEG_BOLD, self).__init__() # To inherit parameters attribute
         self.eeg = EEG_Layer(num_regions, paramsEEG, num_channels, device = device)
         self.bold = BOLD_Layer(num_regions, paramsBOLD, device = device)
-        super(RWWEI2_EEG_BOLD, self).__init__(num_regions, paramsNode, Con_Mtx, dist_mtx, step_size, useBC = False, device = device)
-        
+        self.NMM = RWWEI2(num_regions, paramsNode, Con_Mtx, dist_mtx, step_size, sim_len=sim_len, useBC = False, device = device)
+        self.params = paramsNode
         self.node_size = num_regions
         self.step_size = step_size
         self.sim_len = sim_len
-        
+        self.output_size = num_regions
         self.device = device
         
         self.batch_size = 1
-        
+        self.use_fit_gains = self.NMM.use_fit_gains  # flag for fitting gains
+        self.use_fit_lfm = self.NMM.use_fit_lfm
+        self.useBC =self.NMM.useBC
         self.tr = sim_len
         self.steps_per_TR = 1
         self.TRs_per_window = 1
@@ -31,64 +33,66 @@ def __init__(self, num_regions, num_channels, paramsNode, paramsEEG, paramsBOLD,
         self.state_names = ['E', 'I', 'x', 'f', 'v', 'q']
         self.output_names = ["bold", "eeg"]
         self.track_params = []  #Is populated during setModelParameters()
+        self.params_fitted={}
+        self.params_fitted['modelparameter'] =[]
+        self.params_fitted['hyperparameter'] =[]
         
-        self.setModelParameters()
-
+        self.NMM.setModelParameters()
+        
+        self.eeg.setModelParameters()
+        self.bold.setModelParameters()
+        
+        self.params_fitted['modelparameter'] = self.NMM.params_fitted['modelparameter'] + self.eeg.params_fitted['modelparameter'] +self.bold.params_fitted['modelparameter']
+        self.params_fitted['hyperparameter'] = self.NMM.params_fitted['hyperparameter'] + self.eeg.params_fitted['hyperparameter'] +self.bold.params_fitted['hyperparameter']
+        self.track_params = self.NMM.track_params + self.eeg.track_params +self.bold.track_params
     def info(self):
         return {"state_names": ['E', 'I', 'x', 'f', 'v', 'q'], "output_names": ["bold", "eeg"]} #TODO: Update to take multiple output names
             
-    def setModelParameters(self):
-        return setModelParameters(self)
+    
         
     def createIC(self, ver):
-        return createIC(self, ver)
+        self.NMM.next_start_state = 0.2 * torch.rand((self.node_size, 6, self.batch_size)) \
+        + torch.tensor([[0], [0], [0], [1.0], [1.0], [1.0]]).repeat(self.node_size, 1, self.batch_size)
+        self.NMM.next_start_state = self.NMM.next_start_state.to(self.device)
+    
+        return self.NMM.next_start_state
+        
+    def createDelayIC(self, ver):
+        # Creates a time series of state variables to represent their past values as needed when delays are used. 
+        
+        return torch.tensor(1.0) #Dummy variable if delays are not used
+    
         
     def setBlocks(self, num_blocks):
         self.num_blocks = num_blocks
         self.eeg.num_blocks = num_blocks
         self.bold.num_blocks = num_blocks
     
-    def forward(self, external, hx, hE, setNoise = None):
-        return forward(self, external, hx, hE, setNoise)
-
-def setModelParameters(self):
-    super(RWWEI2_EEG_BOLD, self).setModelParameters() # Currently this is the only one with parameters being fitted
-    self.eeg.setModelParameters()
-    self.bold.setModelParameters()    
-
-def createIC(self, ver):
-    #super(RWWEI2_EEG_BOLD, self).createIC()
-    #self.eeg.createIC()
-    #self.bold.createIC()
-    
-    self.next_start_state = 0.2 * torch.rand((self.node_size, 6, self.batch_size)) + torch.tensor([[0], [0], [0], [1.0], [1.0], [1.0]]).repeat(self.node_size, 1, self.batch_size)
-    self.next_start_state = self.next_start_state.to(self.device)
+    def forward(self, external, hx, hE, setNoise=None):
+        
+        NMM_vals, hE = self.NMM.forward(external, self.NMM.next_start_state[:, 0:2, :], hE, setNoise) #TODO: Fix the hx in the future
+        print(NMM_vals['E'].shape)
+        EEG_vals, hE = self.eeg.forward(self.step_size, self.sim_len, NMM_vals["E"].permute((1,0,2)))
+        BOLD_vals, hE = self.bold.forward(self.NMM.next_start_state[:, 2:6, :], self.step_size, self.sim_len, NMM_vals["E"].permute((1,0,2)))
+        
+        self.NMM.next_start_state = torch.cat((NMM_vals["NMM_state"], BOLD_vals["BOLD_state"]), dim=1).detach()
+        
+        sim_vals = {**NMM_vals, **EEG_vals, **BOLD_vals}
+        sim_vals['current_state'] = torch.tensor(1.0, device = self.device) #Dummy variable
+        
+        # Reshape if Blocking is being Used
+        if self.NMM.num_blocks > 1:
+            for simKey in set(self.state_names + self.output_names):
+                #print(sim_vals[simKey].shape) # Nodes x Time x Blocks
+                #print(torch.unsqueeze(sim_vals[simKey].permute((1,0,2)),2).shape) #Time x Nodes x SV x Blocks
+                sim_vals[simKey] = serializeTS(torch.unsqueeze(sim_vals[simKey].permute((1,0,2)),2), sim_vals[simKey].shape[0], 1).permute((1,0,2))[:,:,0]
+        else:
+            for simKey in set(self.state_names + self.output_names):
+                sim_vals[simKey] = sim_vals[simKey][:,:,0]
+                
+        return sim_vals, hE
     
-    return self.next_start_state
     
-
-def forward(self, external, hx, hE, setNoise):
-    
-    NMM_vals, hE = super(RWWEI2_EEG_BOLD, self).forward(external, self.next_start_state[:, 0:2, :], hE, setNoise) #TODO: Fix the hx in the future
-    EEG_vals, hE = self.eeg.forward(self.step_size, self.sim_len, NMM_vals["E"].permute((1,0,2)))
-    BOLD_vals, hE = self.bold.forward(self.next_start_state[:, 2:6, :], self.step_size, self.sim_len, NMM_vals["E"].permute((1,0,2)))
-    
-    self.next_start_state = torch.cat((NMM_vals["NMM_state"], BOLD_vals["BOLD_state"]), dim=1).detach()
-    
-    sim_vals = {**NMM_vals, **EEG_vals, **BOLD_vals}
-    sim_vals['current_state'] = torch.tensor(1.0, device = self.device) #Dummy variable
-    
-    # Reshape if Blocking is being Used
-    if self.num_blocks > 1:
-        for simKey in set(self.state_names + self.output_names):
-            #print(sim_vals[simKey].shape) # Nodes x Time x Blocks
-            #print(torch.unsqueeze(sim_vals[simKey].permute((1,0,2)),2).shape) #Time x Nodes x SV x Blocks
-            sim_vals[simKey] = serializeTS(torch.unsqueeze(sim_vals[simKey].permute((1,0,2)),2), sim_vals[simKey].shape[0], 1).permute((1,0,2))[:,:,0]
-    else:
-        for simKey in set(self.state_names + self.output_names):
-            sim_vals[simKey] = sim_vals[simKey][:,:,0]
-            
-    return sim_vals, hE
     
     
 def blockTS(data, blocks, numNodes, numSV):
diff --git a/whobpyt/models/RWWEI2/RWWEI2.py b/whobpyt/models/RWWEI2/RWWEI2.py
index de2fc22d..c677e7a7 100644
--- a/whobpyt/models/RWWEI2/RWWEI2.py
+++ b/whobpyt/models/RWWEI2/RWWEI2.py
@@ -1,8 +1,9 @@
 import torch
 from whobpyt.datatypes import AbstractNMM, AbstractParams, par
+from torch.nn.parameter import Parameter
 from math import sqrt
 
-class RWWEI2(AbstractNMM):
+class RWWEI2(torch.nn.Module):
     '''
     Reduced Wong Wang Excitatory Inhibatory (RWWEXcInh) Model - Version 2
     
@@ -120,6 +121,12 @@ def createIC(self, ver):
         
         return self.next_start_state
         
+    
+    
+    def createDelayIC(self, ver):
+        # Creates a time series of state variables to represent their past values as needed when delays are used. 
+        
+        return torch.tensor(1.0) #Dummy variable if delays are not used
     def setBlocks(self, num_blocks):
         self.num_blocks = num_blocks
     

From b749a3300142a7f048cd1557a91fd426addb6eac Mon Sep 17 00:00:00 2001
From: John Wang <zhengwangdataanalyst@gmail.com>
Date: Thu, 25 Jan 2024 16:34:12 +0000
Subject: [PATCH 3/9] modify cost for rww

---
 whobpyt/optimization/custom_cost_RWW.py | 54 +++----------------------
 1 file changed, 5 insertions(+), 49 deletions(-)

diff --git a/whobpyt/optimization/custom_cost_RWW.py b/whobpyt/optimization/custom_cost_RWW.py
index 19ff4ed3..6dfbdcb3 100644
--- a/whobpyt/optimization/custom_cost_RWW.py
+++ b/whobpyt/optimization/custom_cost_RWW.py
@@ -49,42 +49,7 @@ def loss(self, simData: dict, empData: torch.Tensor):
         v_window = state_vals['v']
         x_window = state_vals['x']
         q_window = state_vals['q']
-        if model.use_Gaussian_EI and model.use_Bifurcation:
-            loss_EI = torch.mean(model.E_v_inv * (E_window - model.E_m) ** 2) \
-                      + torch.mean(-torch.log(model.E_v_inv)) + \
-                      torch.mean(model.I_v_inv * (I_window - model.I_m) ** 2) \
-                      + torch.mean(-torch.log(model.I_v_inv)) + \
-                      torch.mean(model.q_v_inv * (q_window - model.q_m) ** 2) \
-                      + torch.mean(-torch.log(model.q_v_inv)) + \
-                      torch.mean(model.v_v_inv * (v_window - model.v_m) ** 2) \
-                      + torch.mean(-torch.log(model.v_v_inv)) \
-                      + 5.0 * (m(model.sup_ca) * m(model.g_IE) ** 2
-                               - m(model.sup_cb) * m(model.params.g_IE.value())
-                               + m(model.sup_cc) - m(model.params.g_EI.value())) ** 2
-        if model.use_Gaussian_EI and not model.use_Bifurcation:
-            loss_EI = torch.mean(model.E_v_inv * (E_window - model.E_m) ** 2) \
-                      + torch.mean(-torch.log(model.E_v_inv)) + \
-                      torch.mean(model.I_v_inv * (I_window - model.I_m) ** 2) \
-                      + torch.mean(-torch.log(model.I_v_inv)) + \
-                      torch.mean(model.q_v_inv * (q_window - model.q_m) ** 2) \
-                      + torch.mean(-torch.log(model.q_v_inv)) + \
-                      torch.mean(model.v_v_inv * (v_window - model.v_m) ** 2) \
-                      + torch.mean(-torch.log(model.v_v_inv))
-
-        if not model.use_Gaussian_EI and model.use_Bifurcation:
-            loss_EI = .1 * torch.mean(
-                torch.mean(E_window * torch.log(E_window) + (1 - E_window) * torch.log(1 - E_window) \
-                           + 0.5 * I_window * torch.log(I_window) + 0.5 * (1 - I_window) * torch.log(
-                    1 - I_window), dim=1)) + \
-                      + 5.0 * (m(model.sup_ca) * m(model.params.g_IE.value()) ** 2
-                               - m(model.sup_cb) * m(model.params.g_IE.value())
-                               + m(model.sup_cc) - m(model.params.g_EI.value())) ** 2
-
-        if not model.use_Gaussian_EI and not model.use_Bifurcation:
-            loss_EI = .1 * torch.mean(
-                torch.mean(E_window * torch.log(E_window) + (1 - E_window) * torch.log(1 - E_window) \
-                           + 0.5 * I_window * torch.log(I_window) + 0.5 * (1 - I_window) * torch.log(
-                    1 - I_window), dim=1))
+        
 
         loss_prior = []
 
@@ -93,19 +58,10 @@ def loss(self, simData: dict, empData: torch.Tensor):
         for var_name in variables_p:
             # print(var)
             var = getattr(model.params, var_name)
-            if model.use_Bifurcation:
-                if var.has_prior and var_name not in ['std_in', 'g_EI', 'g_IE'] and \
-                        var_name not in exclude_param:
-                    loss_prior.append(torch.sum((lb + m(var.prior_var)) * \
-                                                (m(var.val) - m(var.prior_mean)) ** 2) \
-                                      + torch.sum(-torch.log(lb + m(var.prior_var)))) #TODO: Double check about converting _v_inv to just variance representation
-            else:
-                if var.has_prior and var_name not in ['std_in'] and \
-                        var_name not in exclude_param:
-                    loss_prior.append(torch.sum((lb + m(var.prior_var)) * \
-                                                (m(var.val) - m(var.prior_mean)) ** 2) \
-                                      + torch.sum(-torch.log(lb + m(var.prior_var)))) #TODO: Double check about converting _v_inv to just variance representation
+            if var.has_prior and var_name not in ['std_in'] and var_name not in exclude_param:
+                loss_prior.append(torch.sum((lb + m(var.prior_var)) * (m(var.val) - m(var.prior_mean)) ** 2) \
+                + torch.sum(-torch.log(lb + m(var.prior_var)))) #TODO: Double check about converting _v_inv to just variance representation
           
         # total loss
-        loss = w_cost * loss_main + sum(loss_prior) + 1 * loss_EI
+        loss = w_cost * loss_main + sum(loss_prior) 
         return loss

From 82bd0731b67acebaf495731a5bc4fcadbe094512 Mon Sep 17 00:00:00 2001
From: John Wang <zhengwangdataanalyst@gmail.com>
Date: Thu, 25 Jan 2024 16:34:44 +0000
Subject: [PATCH 4/9] modify model fitting

---
 whobpyt/run/customfitting.py | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/whobpyt/run/customfitting.py b/whobpyt/run/customfitting.py
index 77e8164b..d9b40fd9 100644
--- a/whobpyt/run/customfitting.py
+++ b/whobpyt/run/customfitting.py
@@ -91,7 +91,7 @@ def train(self, stim, empDatas, num_epochs, block_len, learningrate = 0.05, rese
                     # initials of history of E
                     delayHist = self.model.createDelayIC(ver = 0) #TODO: Delays are currently is not implemented in various places
                 else:
-                    firstIC = self.model.next_start_state
+                    firstIC = self.model.NMM.next_start_state
                     delayHist = torch.tensor(1.0, device = self.device) # TODO: Delays are currently is not implemented in various places
 
                 # initial the external inputs
@@ -100,7 +100,7 @@ def train(self, stim, empDatas, num_epochs, block_len, learningrate = 0.05, rese
                 num_blocks = int(self.model.sim_len/block_len)
             
                 ## Noise for the epoch (which has 1 batch)
-                [serialNoise, blockNoise] = self.model.genNoise(block_len)
+                [serialNoise, blockNoise] = self.model.NMM.genNoise(block_len)
 
                 with torch.no_grad():
                     sim_vals, delayHist = self.model.forward(external, firstIC, delayHist, serialNoise)
@@ -127,9 +127,9 @@ def train(self, stim, empDatas, num_epochs, block_len, learningrate = 0.05, rese
                 nextICs = ICs[:,:,[-1]] #Brackets are to keep the dimension
 
                 self.model.setBlocks(num_blocks)
-                self.model.next_start_state = newICs
+                self.model.NMM.next_start_state = newICs
                 sim_vals, delayHist = self.model.forward(external, newICs, delayHist, blockNoise)
-                self.model.next_start_state = nextICs #TODO: Update to deal with delays as well
+                self.model.NMM.next_start_state = nextICs #TODO: Update to deal with delays as well
                 
                 print("Blocked Finished")
                 

From 3ee0ccfc33b92ab77957922e25327276d959cd3a Mon Sep 17 00:00:00 2001
From: John Wang <zhengwangdataanalyst@gmail.com>
Date: Thu, 25 Jan 2024 17:10:25 +0000
Subject: [PATCH 5/9] modify examples

---
 examples/eg002r__multimodal_simulation.py | 5 ++++-
 examples/eg003r__fitting_rww_example.py   | 2 +-
 examples/eg004r__fitting_JR_example.py    | 2 +-
 examples/eg005r__gpu_support.py           | 5 ++++-
 examples/eg006r__replicate_Momi2023.py    | 4 +++-
 5 files changed, 13 insertions(+), 5 deletions(-)

diff --git a/examples/eg002r__multimodal_simulation.py b/examples/eg002r__multimodal_simulation.py
index 89523b1f..08b73490 100644
--- a/examples/eg002r__multimodal_simulation.py
+++ b/examples/eg002r__multimodal_simulation.py
@@ -20,7 +20,10 @@
 # Importage
 # ---------------------------------------------------
 #
-
+# os stuff
+import os
+import sys
+sys.path.append('..')
 # whobpyt stuff
 import whobpyt
 from whobpyt.datatypes import par, Recording
diff --git a/examples/eg003r__fitting_rww_example.py b/examples/eg003r__fitting_rww_example.py
index 67bdad7d..0ae4a5fa 100644
--- a/examples/eg003r__fitting_rww_example.py
+++ b/examples/eg003r__fitting_rww_example.py
@@ -110,7 +110,7 @@
 
 # %%
 # call model want to fit
-model = RNNRWW(node_size, TPperWindow, step_size, repeat_size, tr, sc, True, params)
+model = RNNRWW(params, node_size =node_size, TRs_per_window =TPperWindow, step_size=step_size, tr=tr, sc=sc, use_fit_gains=True)
 
 # %%
 # create objective function
diff --git a/examples/eg004r__fitting_JR_example.py b/examples/eg004r__fitting_JR_example.py
index d8a43f53..2db38077 100644
--- a/examples/eg004r__fitting_JR_example.py
+++ b/examples/eg004r__fitting_JR_example.py
@@ -108,7 +108,7 @@
 
 # %%
 # call model want to fit
-model = RNNJANSEN(node_size, TPperWindow, step_size, output_size, tr, sc, lm, dist, True, False, params)
+model = RNNJANSEN(params, node_size=node_size, TRs_per_window=TPperWindow, step_size=step_size, output_size=output_size, tr=tr, sc=sc, lm=lm, dist=dist, use_fit_gains=True, use_fit_lfm = False)
 
 # %%
 # create objective function
diff --git a/examples/eg005r__gpu_support.py b/examples/eg005r__gpu_support.py
index e173cac2..6dad8466 100644
--- a/examples/eg005r__gpu_support.py
+++ b/examples/eg005r__gpu_support.py
@@ -16,7 +16,10 @@
 # Importage
 # ---------------------------------------------------
 #
-
+# os stuff
+import os
+import sys
+sys.path.append('..')
 # whobpyt stuff
 import whobpyt
 from whobpyt.datatypes import par, Recording
diff --git a/examples/eg006r__replicate_Momi2023.py b/examples/eg006r__replicate_Momi2023.py
index 462431f8..c869d7a0 100644
--- a/examples/eg006r__replicate_Momi2023.py
+++ b/examples/eg006r__replicate_Momi2023.py
@@ -132,7 +132,9 @@
 
 # %%
 # call model want to fit
-model = RNNJANSEN(node_size, batch_size, step_size, output_size, tr, sc, lm, dist, True, False, params)
+# call model want to fit
+model = RNNJANSEN(params, node_size=node_size, TRs_per_window=TPperWindow, step_size=step_size, output_size=output_size, tr=tr, sc=sc, lm=lm, dist=dist, use_fit_gains=True, use_fit_lfm = False)
+
 
 
 # create objective function

From aa8de7a688419bb364f18e28fc850d8d0a54245f Mon Sep 17 00:00:00 2001
From: John Wang <zhengwangdataanalyst@gmail.com>
Date: Thu, 25 Jan 2024 18:55:55 +0000
Subject: [PATCH 6/9] eg 05 working

---
 whobpyt/models/BOLD/BOLD.py                |   8 +-
 whobpyt/models/EEG/EEG.py                  |  61 ++++++------
 whobpyt/models/RWWEI2/Multimodal_RWWEI2.py | 109 ++++++++++-----------
 whobpyt/models/RWWEI2/RWWEI2.py            |   5 +-
 4 files changed, 88 insertions(+), 95 deletions(-)

diff --git a/whobpyt/models/BOLD/BOLD.py b/whobpyt/models/BOLD/BOLD.py
index 86594fa9..de5faea2 100644
--- a/whobpyt/models/BOLD/BOLD.py
+++ b/whobpyt/models/BOLD/BOLD.py
@@ -29,10 +29,7 @@ def __init__(self, num_regions, params, useBC = False, device = torch.device('cp
         self.device = device
         
         self.num_blocks = 1
-        self.params_fitted = {}
-        self.params_fitted['modelparameter'] =[]
-        self.params_fitted['hyperparameter'] =[]
-        self.track_params = []
+        
         self.params = params
         
         self.setModelParameters()
@@ -140,5 +137,4 @@ def forward(self, init_state, step_size, sim_len, node_history):
     sim_vals["q"]    = layer_hist[:, :, 3, :].permute((1,0,2))
     sim_vals["bold"] = layer_hist[:, :, 4, :].permute((1,0,2)) # Post Permute: Nodes x Time x Batch
     
-    return sim_vals, hE
-
+    return sim_vals, hE
\ No newline at end of file
diff --git a/whobpyt/models/EEG/EEG.py b/whobpyt/models/EEG/EEG.py
index 78c994d6..424a0df6 100644
--- a/whobpyt/models/EEG/EEG.py
+++ b/whobpyt/models/EEG/EEG.py
@@ -21,11 +21,7 @@ def __init__(self, num_regions, params, num_channels, device = torch.device('cpu
         
         self.num_regions = num_regions
         self.num_channels = num_channels
-        self.params_fitted = {}
-        self.params_fitted['modelparameter'] =[]
-        self.params_fitted['hyperparameter'] =[]
         
-        self.track_params = []
         self.device = device
         
         self.num_blocks = 1
@@ -38,39 +34,44 @@ def info(self):
         return {"state_names": ["None"], "output_name": "eeg"}
             
     def setModelParameters(self):
-        self.LF = self.params.LF.to(self.device)
+        return setModelParameters(self)
         
     def createIC(self, ver):
         pass
     
     def forward(self, step_size, sim_len, node_history, device = torch.device('cpu')):
-        hE = torch.tensor(1.0).to(self.device) #Dummy variable
+        return forward(self, step_size, sim_len, node_history)   
+
+def setModelParameters(self):
+    #############################################
+    ## EEG Lead Field
+    #############################################
     
-        # Runs the EEG Model
-        #
-        # INPUT
-        #  step_size: Float - The step size in msec which must match node_history step size.
-        #  sim_len: Int - The amount of EEG to simulate in msec, and should match time simulated in node_history. 
-        #  node_history: Tensor - [time_points, regions, num_blocks] # This would be input coming from NMM
-        #  device: torch.device - Whether to run on GPU or CPU - default is CPU and GPU has not been tested for Network_NMM code
-        #
-        # OUTPUT
-        #  layer_history: Tensor - [time_steps, regions, num_blocks]
-        #
-        
-        layer_hist = torch.zeros(int((sim_len/step_size)/self.num_blocks), self.num_channels, self.num_blocks).to(self.device)
-        print(layer_hist.shape)
-        num_steps = int((sim_len/step_size)/self.num_blocks)
-        for i in range(num_steps):
-            layer_hist[i, :, :] = torch.matmul(self.LF, node_history[i, :, :]) # TODO: Check dimensions and if correct transpose of LF
-        
-        sim_vals = {}
-        sim_vals["eeg"] = layer_hist.permute((1,0,2)) # time x node x batch -> node x time x batch
+    self.LF = self.params.LF.to(self.device)
     
-        return sim_vals, hE   
-
-
+def forward(self, step_size, sim_len, node_history):
     
+    hE = torch.tensor(1.0).to(self.device) #Dummy variable
+    
+    # Runs the EEG Model
+    #
+    # INPUT
+    #  step_size: Float - The step size in msec which must match node_history step size.
+    #  sim_len: Int - The amount of EEG to simulate in msec, and should match time simulated in node_history. 
+    #  node_history: Tensor - [time_points, regions, num_blocks] # This would be input coming from NMM
+    #  device: torch.device - Whether to run on GPU or CPU - default is CPU and GPU has not been tested for Network_NMM code
+    #
+    # OUTPUT
+    #  layer_history: Tensor - [time_steps, regions, num_blocks]
+    #
+    
+    layer_hist = torch.zeros(int((sim_len/step_size)/self.num_blocks), self.num_channels, self.num_blocks).to(self.device)
 
+    num_steps = int((sim_len/step_size)/self.num_blocks)
+    for i in range(num_steps):
+        layer_hist[i, :, :] = torch.matmul(self.LF, node_history[i, :, :]) # TODO: Check dimensions and if correct transpose of LF
     
-    
\ No newline at end of file
+    sim_vals = {}
+    sim_vals["eeg"] = layer_hist.permute((1,0,2)) # time x node x batch -> node x time x batch
+
+    return sim_vals, hE
\ No newline at end of file
diff --git a/whobpyt/models/RWWEI2/Multimodal_RWWEI2.py b/whobpyt/models/RWWEI2/Multimodal_RWWEI2.py
index 87c10a76..b2b4360c 100644
--- a/whobpyt/models/RWWEI2/Multimodal_RWWEI2.py
+++ b/whobpyt/models/RWWEI2/Multimodal_RWWEI2.py
@@ -6,26 +6,25 @@
 from whobpyt.models.BOLD import BOLD_Layer, BOLD_Params
 from whobpyt.models.EEG import EEG_Layer, EEG_Params
 
-class RWWEI2_EEG_BOLD(torch.nn.Module):
+class RWWEI2_EEG_BOLD(RWWEI2):
 
-    model_name = "RWWEI2_EEG_BOLD"
+    
     
     def __init__(self, num_regions, num_channels, paramsNode, paramsEEG, paramsBOLD, Con_Mtx, dist_mtx, step_size, sim_len, device = torch.device('cpu')):
-        super(RWWEI2_EEG_BOLD, self).__init__() # To inherit parameters attribute
+
+        
+        super(RWWEI2_EEG_BOLD, self).__init__(num_regions, paramsNode, Con_Mtx, dist_mtx, step_size, useBC = False, device = device)
+        self.model_name = "RWWEI2_EEG_BOLD"
         self.eeg = EEG_Layer(num_regions, paramsEEG, num_channels, device = device)
         self.bold = BOLD_Layer(num_regions, paramsBOLD, device = device)
-        self.NMM = RWWEI2(num_regions, paramsNode, Con_Mtx, dist_mtx, step_size, sim_len=sim_len, useBC = False, device = device)
-        self.params = paramsNode
         self.node_size = num_regions
         self.step_size = step_size
         self.sim_len = sim_len
-        self.output_size = num_regions
+        
         self.device = device
         
         self.batch_size = 1
-        self.use_fit_gains = self.NMM.use_fit_gains  # flag for fitting gains
-        self.use_fit_lfm = self.NMM.use_fit_lfm
-        self.useBC =self.NMM.useBC
+        
         self.tr = sim_len
         self.steps_per_TR = 1
         self.TRs_per_window = 1
@@ -33,66 +32,64 @@ def __init__(self, num_regions, num_channels, paramsNode, paramsEEG, paramsBOLD,
         self.state_names = ['E', 'I', 'x', 'f', 'v', 'q']
         self.output_names = ["bold", "eeg"]
         self.track_params = []  #Is populated during setModelParameters()
-        self.params_fitted={}
-        self.params_fitted['modelparameter'] =[]
-        self.params_fitted['hyperparameter'] =[]
-        
-        self.NMM.setModelParameters()
         
-        self.eeg.setModelParameters()
-        self.bold.setModelParameters()
-        
-        self.params_fitted['modelparameter'] = self.NMM.params_fitted['modelparameter'] + self.eeg.params_fitted['modelparameter'] +self.bold.params_fitted['modelparameter']
-        self.params_fitted['hyperparameter'] = self.NMM.params_fitted['hyperparameter'] + self.eeg.params_fitted['hyperparameter'] +self.bold.params_fitted['hyperparameter']
-        self.track_params = self.NMM.track_params + self.eeg.track_params +self.bold.track_params
+        self.setModelParameters_here()
+
     def info(self):
         return {"state_names": ['E', 'I', 'x', 'f', 'v', 'q'], "output_names": ["bold", "eeg"]} #TODO: Update to take multiple output names
             
-    
+    def setModelParameters_here(self):
+        return setModelParameters_here(self)
         
     def createIC(self, ver):
-        self.NMM.next_start_state = 0.2 * torch.rand((self.node_size, 6, self.batch_size)) \
-        + torch.tensor([[0], [0], [0], [1.0], [1.0], [1.0]]).repeat(self.node_size, 1, self.batch_size)
-        self.NMM.next_start_state = self.NMM.next_start_state.to(self.device)
-    
-        return self.NMM.next_start_state
-        
-    def createDelayIC(self, ver):
-        # Creates a time series of state variables to represent their past values as needed when delays are used. 
-        
-        return torch.tensor(1.0) #Dummy variable if delays are not used
-    
+        return createIC(self, ver)
         
     def setBlocks(self, num_blocks):
         self.num_blocks = num_blocks
         self.eeg.num_blocks = num_blocks
         self.bold.num_blocks = num_blocks
     
-    def forward(self, external, hx, hE, setNoise=None):
-        
-        NMM_vals, hE = self.NMM.forward(external, self.NMM.next_start_state[:, 0:2, :], hE, setNoise) #TODO: Fix the hx in the future
-        print(NMM_vals['E'].shape)
-        EEG_vals, hE = self.eeg.forward(self.step_size, self.sim_len, NMM_vals["E"].permute((1,0,2)))
-        BOLD_vals, hE = self.bold.forward(self.NMM.next_start_state[:, 2:6, :], self.step_size, self.sim_len, NMM_vals["E"].permute((1,0,2)))
-        
-        self.NMM.next_start_state = torch.cat((NMM_vals["NMM_state"], BOLD_vals["BOLD_state"]), dim=1).detach()
-        
-        sim_vals = {**NMM_vals, **EEG_vals, **BOLD_vals}
-        sim_vals['current_state'] = torch.tensor(1.0, device = self.device) #Dummy variable
-        
-        # Reshape if Blocking is being Used
-        if self.NMM.num_blocks > 1:
-            for simKey in set(self.state_names + self.output_names):
-                #print(sim_vals[simKey].shape) # Nodes x Time x Blocks
-                #print(torch.unsqueeze(sim_vals[simKey].permute((1,0,2)),2).shape) #Time x Nodes x SV x Blocks
-                sim_vals[simKey] = serializeTS(torch.unsqueeze(sim_vals[simKey].permute((1,0,2)),2), sim_vals[simKey].shape[0], 1).permute((1,0,2))[:,:,0]
-        else:
-            for simKey in set(self.state_names + self.output_names):
-                sim_vals[simKey] = sim_vals[simKey][:,:,0]
-                
-        return sim_vals, hE
+    def forward(self, external, hx, hE, setNoise = None):
+        return forward(self, external, hx, hE, setNoise)
+
+def setModelParameters_here(self):
+    self.setModelParameters() # Currently this is the only one with parameters being fitted
+    self.eeg.setModelParameters()
+    self.bold.setModelParameters()    
+
+def createIC(self, ver):
+    #super(RWWEI2_EEG_BOLD, self).createIC()
+    #self.eeg.createIC()
+    #self.bold.createIC()
     
+    self.next_start_state = 0.2 * torch.rand((self.node_size, 6, self.batch_size)) + torch.tensor([[0], [0], [0], [1.0], [1.0], [1.0]]).repeat(self.node_size, 1, self.batch_size)
+    self.next_start_state = self.next_start_state.to(self.device)
     
+    return self.next_start_state
+    
+
+def forward(self, external, hx, hE, setNoise):
+    
+    NMM_vals, hE = super(RWWEI2_EEG_BOLD, self).forward(external, self.next_start_state[:, 0:2, :], hE, setNoise) #TODO: Fix the hx in the future
+    EEG_vals, hE = self.eeg.forward(self.step_size, self.sim_len, NMM_vals["E"].permute((1,0,2)))
+    BOLD_vals, hE = self.bold.forward(self.next_start_state[:, 2:6, :], self.step_size, self.sim_len, NMM_vals["E"].permute((1,0,2)))
+    
+    self.next_start_state = torch.cat((NMM_vals["NMM_state"], BOLD_vals["BOLD_state"]), dim=1).detach()
+    
+    sim_vals = {**NMM_vals, **EEG_vals, **BOLD_vals}
+    sim_vals['current_state'] = torch.tensor(1.0, device = self.device) #Dummy variable
+    
+    # Reshape if Blocking is being Used
+    if self.num_blocks > 1:
+        for simKey in set(self.state_names + self.output_names):
+            #print(sim_vals[simKey].shape) # Nodes x Time x Blocks
+            #print(torch.unsqueeze(sim_vals[simKey].permute((1,0,2)),2).shape) #Time x Nodes x SV x Blocks
+            sim_vals[simKey] = serializeTS(torch.unsqueeze(sim_vals[simKey].permute((1,0,2)),2), sim_vals[simKey].shape[0], 1).permute((1,0,2))[:,:,0]
+    else:
+        for simKey in set(self.state_names + self.output_names):
+            sim_vals[simKey] = sim_vals[simKey][:,:,0]
+            
+    return sim_vals, hE
     
     
 def blockTS(data, blocks, numNodes, numSV):
@@ -125,4 +122,4 @@ def serializeTS(data, numNodes, numSV):
     data_r = torch.reshape(data_p, (numSV, numNodes, newTimeDim))
     data_p2 = data_r.permute((2,1,0))
     
-    return data_p2
+    return data_p2
\ No newline at end of file
diff --git a/whobpyt/models/RWWEI2/RWWEI2.py b/whobpyt/models/RWWEI2/RWWEI2.py
index c677e7a7..aa3c3d94 100644
--- a/whobpyt/models/RWWEI2/RWWEI2.py
+++ b/whobpyt/models/RWWEI2/RWWEI2.py
@@ -120,13 +120,12 @@ def createIC(self, ver):
         self.next_start_state = self.next_start_state.to(self.device)
         
         return self.next_start_state
-        
-    
-    
+
     def createDelayIC(self, ver):
         # Creates a time series of state variables to represent their past values as needed when delays are used. 
         
         return torch.tensor(1.0) #Dummy variable if delays are not used
+
     def setBlocks(self, num_blocks):
         self.num_blocks = num_blocks
     

From 0f81da9c0736a76c7dc699406d8bf590ab04e8ad Mon Sep 17 00:00:00 2001
From: John Wang <zhengwangdataanalyst@gmail.com>
Date: Thu, 25 Jan 2024 18:57:27 +0000
Subject: [PATCH 7/9] eg 06 working

---
 examples/eg006r__replicate_Momi2023.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/examples/eg006r__replicate_Momi2023.py b/examples/eg006r__replicate_Momi2023.py
index c869d7a0..f5a950f2 100644
--- a/examples/eg006r__replicate_Momi2023.py
+++ b/examples/eg006r__replicate_Momi2023.py
@@ -104,7 +104,7 @@
 output_size = eeg_data.shape[0]
 batch_size = 20
 step_size = 0.0001
-num_epoches = 120
+num_epoches = 20
 tr = 0.001
 state_size = 6
 base_batch_num = 20
@@ -133,7 +133,7 @@
 # %%
 # call model want to fit
 # call model want to fit
-model = RNNJANSEN(params, node_size=node_size, TRs_per_window=TPperWindow, step_size=step_size, output_size=output_size, tr=tr, sc=sc, lm=lm, dist=dist, use_fit_gains=True, use_fit_lfm = False)
+model = RNNJANSEN(params, node_size=node_size, TRs_per_window=batch_size, step_size=step_size, output_size=output_size, tr=tr, sc=sc, lm=lm, dist=dist, use_fit_gains=True, use_fit_lfm = False)
 
 
 

From fc3aabc42dd8862e0693a4b13230a0910a34d103 Mon Sep 17 00:00:00 2001
From: John Wang <zhengwangdataanalyst@gmail.com>
Date: Thu, 25 Jan 2024 18:58:32 +0000
Subject: [PATCH 8/9] eg 05 working

---
 whobpyt/run/customfitting.py | 11 +++++------
 1 file changed, 5 insertions(+), 6 deletions(-)

diff --git a/whobpyt/run/customfitting.py b/whobpyt/run/customfitting.py
index d9b40fd9..f41cfab8 100644
--- a/whobpyt/run/customfitting.py
+++ b/whobpyt/run/customfitting.py
@@ -91,7 +91,7 @@ def train(self, stim, empDatas, num_epochs, block_len, learningrate = 0.05, rese
                     # initials of history of E
                     delayHist = self.model.createDelayIC(ver = 0) #TODO: Delays are currently is not implemented in various places
                 else:
-                    firstIC = self.model.NMM.next_start_state
+                    firstIC = self.model.next_start_state
                     delayHist = torch.tensor(1.0, device = self.device) # TODO: Delays are currently is not implemented in various places
 
                 # initial the external inputs
@@ -100,7 +100,7 @@ def train(self, stim, empDatas, num_epochs, block_len, learningrate = 0.05, rese
                 num_blocks = int(self.model.sim_len/block_len)
             
                 ## Noise for the epoch (which has 1 batch)
-                [serialNoise, blockNoise] = self.model.NMM.genNoise(block_len)
+                [serialNoise, blockNoise] = self.model.genNoise(block_len)
 
                 with torch.no_grad():
                     sim_vals, delayHist = self.model.forward(external, firstIC, delayHist, serialNoise)
@@ -127,9 +127,9 @@ def train(self, stim, empDatas, num_epochs, block_len, learningrate = 0.05, rese
                 nextICs = ICs[:,:,[-1]] #Brackets are to keep the dimension
 
                 self.model.setBlocks(num_blocks)
-                self.model.NMM.next_start_state = newICs
+                self.model.next_start_state = newICs
                 sim_vals, delayHist = self.model.forward(external, newICs, delayHist, blockNoise)
-                self.model.NMM.next_start_state = nextICs #TODO: Update to deal with delays as well
+                self.model.next_start_state = nextICs #TODO: Update to deal with delays as well
                 
                 print("Blocked Finished")
                 
@@ -173,5 +173,4 @@ def simulate(self, stim, numTP):
         '''
         Not implemented yet.
         '''
-        pass
-    
\ No newline at end of file
+        pass
\ No newline at end of file

From 9b0d1f00334572ad6bd3e13b5d9e34ca5ba82d9d Mon Sep 17 00:00:00 2001
From: John Wang <zhengwangdataanalyst@gmail.com>
Date: Thu, 1 Feb 2024 20:39:14 +0000
Subject: [PATCH 9/9] add HGF model

---
 whobpyt/models/HGF/ParamsHGF.py |  27 ++++++++
 whobpyt/models/HGF/__init__.py  |   2 +
 whobpyt/models/HGF/hgf.py       | 113 ++++++++++++++++++++++++++++++++
 3 files changed, 142 insertions(+)
 create mode 100644 whobpyt/models/HGF/ParamsHGF.py
 create mode 100644 whobpyt/models/HGF/__init__.py
 create mode 100644 whobpyt/models/HGF/hgf.py

diff --git a/whobpyt/models/HGF/ParamsHGF.py b/whobpyt/models/HGF/ParamsHGF.py
new file mode 100644
index 00000000..260f6f71
--- /dev/null
+++ b/whobpyt/models/HGF/ParamsHGF.py
@@ -0,0 +1,27 @@
+import torch
+from whobpyt.datatypes import AbstractParams, par
+
+class ParamsHGF(AbstractParams):
+    def __init__(self, **kwargs):
+
+        super(ParamsHGF, self).__init__(**kwargs)
+
+        params = {
+
+            "omega_3": par(0.03),  # standard deviation of the Gaussian noise
+            "omega_2": par(0.02),  # standard deviation of the Gaussian noise
+
+            "kappa": par(1.),  # scale of the external input
+            "x2mean" : par(1),
+            "deca2" : par(1),
+            "deca3" : par(1),
+            "g_x2_x3" : par(1),
+            "g_x3_x2" : par(1),
+            "c" : par(1)
+            }
+
+        for var in params:
+            if var not in self.params:
+                self.params[var] = params[var]
+
+        self.setParamsAsattr()
\ No newline at end of file
diff --git a/whobpyt/models/HGF/__init__.py b/whobpyt/models/HGF/__init__.py
new file mode 100644
index 00000000..c3c77eb0
--- /dev/null
+++ b/whobpyt/models/HGF/__init__.py
@@ -0,0 +1,2 @@
+from .hgf import HGF
+from .ParamsHGF import ParamsHGF
\ No newline at end of file
diff --git a/whobpyt/models/HGF/hgf.py b/whobpyt/models/HGF/hgf.py
new file mode 100644
index 00000000..376da4f7
--- /dev/null
+++ b/whobpyt/models/HGF/hgf.py
@@ -0,0 +1,113 @@
+import torch
+from torch.nn.parameter import Parameter
+from whobpyt.datatypes import AbstractNMM, AbstractParams, par
+from whobpyt.models.HGF import ParamsHGF
+
+class HGF(AbstractNMM):
+    def __init__(self, paramsHGF, TRperWindow = 20, node_size =1, tr=1, step_size = .05,  use_fit_gains = False, output_size=1) -> None:
+        super(HGF, self).__init__(paramsHGF)
+
+        self.model_name = 'HGF'
+        self.output_names =['x1']
+        self.state_names = np.array(['x2', 'x3'])
+        self.pop_size = 1  # 3 populations JR
+        self.state_size = 2  # 2 states in each population
+        self.tr = tr  # tr ms (integration step 0.1 ms)
+        self.step_size = torch.tensor(step_size, dtype=torch.float32)  # integration step 0.1 ms
+        self.steps_per_TR = int(tr / step_size)
+        self.TRs_per_window = TRperWindow  # size of the batch used at each step
+        self.node_size = node_size  # num of ROI
+        self.output_size = output_size  # num of EEG channels
+        self.use_fit_gains = use_fit_gains
+        #self.params = paramsHGF
+        self.setModelParameters()
+        self.setHyperParameters()
+
+
+    def forward(self, externa=None,  X=None, hE=None):
+        omega3 = self.params.omega_3.value()
+        omega2 = self.params.omega_2.value()
+        kappa = self.params.kappa.value()
+        x2mean = self.params.x2mean.value()
+        deca2 = self.params.deca2.value()
+        deca3 = self.params.deca3.value()
+        c = self.params.c.value()
+        g_x2_x3 = self.params.g_x2_x3.value()
+        g_x3_x2 = self.params.g_x3_x2.value()
+        dt = self.step_size
+        next_state = {}
+
+        state_windows = []
+        x1_windows = []
+        x2=X[:,0:1]
+        x3=X[:,1:2]
+        for TR_i in range(self.TRs_per_window):
+            x2_tmp =[]
+            x3_tmp =[]
+            # Since tr is about second we need to use a small step size like 0.05 to integrate the model states.
+            for step_i in range(self.steps_per_TR):
+
+
+                x3_new = x3 -dt*(deca3*x3-g_x2_x3*x2)+torch.sqrt(dt)*torch.randn(self.node_size,1)*omega3
+                x2_new = x2 -dt*(deca2*x2- g_x3_x2*x3)+ torch.sqrt(dt)*torch.randn(self.node_size,1)*omega2*torch.exp(kappa*x3)
+
+                x2_tmp.append(x2_new)
+                x3_tmp.append(x3_new)
+                x2 = 10*torch.tanh(x2_new/10)
+                x3 = 10*torch.tanh(x3_new/10)
+            #x2 = sum(x2_tmp)/self.steps_per_TR#10*torch.tanh(x2_new/10)
+            #x3 = sum(x3_tmp)/self.steps_per_TR#10*torch.tanh(x3_new/10)
+            state_windows.append(torch.cat([x2, x3], dim =1)[:,:,np.newaxis])
+            x1_windows.append(x2-x2mean)
+            #x1_windows.append(1/(1+torch.exp(-c*(x2-k))))
+        next_state['x1'] = torch.cat(x1_windows, dim =1)
+        next_state['states'] = torch.cat(state_windows, dim =2)
+        next_state['current_state'] = torch.cat([x2, x3], dim =1)
+        return next_state, hE
+
+
+    def createIC(self, ver):
+        """
+
+            A function to return an initial state tensor for the model.
+
+        Parameters
+        ----------
+
+        ver: int
+            Ignored Parameter
+
+
+        Returns
+        ----------
+
+        Tensor
+            Random Initial Conditions for RWW & BOLD
+
+
+        """
+
+        # initial state
+        return torch.tensor(0 * np.random.uniform(-0.02, 0.02, (self.node_size, self.state_size))+np.array([0.0,0.5]), dtype=torch.float32)
+
+
+    def setHyperParameters(self):
+        """
+        Sets the parameters of the model.
+        """
+
+
+
+        # Set w_bb, w_ff, and w_ll as attributes as type Parameter if use_fit_gains is True
+        self.mu2 = Parameter(torch.tensor(0.0*np.ones((self.node_size, 1)), # the lateral gains
+                                                dtype=torch.float32))
+        self.mu3 = Parameter(torch.tensor(1.0*np.ones((self.node_size, 1)), # the lateral gains
+                                                dtype=torch.float32))
+        self.var_inv_2 = Parameter(torch.tensor(.1*np.ones((self.node_size, 1)), # the lateral gains
+                                                dtype=torch.float32))
+        self.var_inv_3 = Parameter(torch.tensor(.1*np.ones((self.node_size, 1)), # the lateral gains
+                                                dtype=torch.float32))
+        self.params_fitted['hyperparameter'].append(self.mu2)
+        self.params_fitted['hyperparameter'].append(self.mu3)
+        self.params_fitted['hyperparameter'].append(self.var_inv_2)
+        self.params_fitted['hyperparameter'].append(self.var_inv_3)