diff --git a/examples/eg002r__multimodal_simulation.py b/examples/eg002r__multimodal_simulation.py index 89523b1f..f2ceefb2 100644 --- a/examples/eg002r__multimodal_simulation.py +++ b/examples/eg002r__multimodal_simulation.py @@ -1,17 +1,14 @@ # -*- coding: utf-8 -*- + r""" ================================= Fitting S_E Mean to 0.164 using default RWW Parameters ================================= What is being modeled: - -- Created a Sphere'd Cube (chosen points on cube projected onto radius = 1 sphere), so that regions were more evently distributed. All corners of cube chosen as regions, thus there are 8 regions. - -- EEG channels located on the center of each face of the cube. Thus there are 6 EEG channels. - -- Added some randomness to initial values - to decorrelate the signals a bit. Looking for FC matrix to look similar to SC matrix. - +Created a Sphered Cube chosen points on cube projected onto radius = 1 sphere, so that regions were more evently distributed. All corners of cube chosen as regions, thus there are 8 regions. +EEG channels located on the center of each face of the cube. Thus there are 6 EEG channels. +Added some randomness to initial values - to decorrelate the signals a bit. Looking for FC matrix to look similar to SC matrix. """ # sphinx_gallery_thumbnail_number = 1 @@ -21,6 +18,11 @@ # --------------------------------------------------- # +# os stuff +import os +import sys +sys.path.append('..') + # whobpyt stuff import whobpyt from whobpyt.datatypes import par, Recording @@ -193,7 +195,7 @@ def loss(self, simData, empData = None, returnLossComponents = False): # Model Simulation # --------------------------------------------------- # -F.simulate(u = 0, numTP = randTS1.length) +F.evaluate(u=0, empRec=randTS1, TPperWindow=TPperWindow , base_window_num=2, transient_num = 10) # %% @@ -207,7 +209,6 @@ def loss(self, simData, empData = None, returnLossComponents = False): plt.xlabel('Time Steps (multiply by step_size to get msec), step_size = ' + str(step_size)) plt.legend() - # %% # Plots of EEG PSD # @@ -222,73 +223,4 @@ def loss(self, simData, empData = None, returnLossComponents = False): plt.plot(sdAxis_dS, sdValues_dS_scaled.detach()[:,n]) plt.xlabel('Hz') plt.ylabel('PSD') -plt.title("Simulated EEG PSD: After Training") - - -# %% -# Plots of BOLD FC -# - -sim_FC = np.corrcoef(F.lastRec['bold'].npTS()[:,skip_trans:]) - -plt.figure(figsize = (8, 8)) -plt.title("Simulated BOLD FC: After Training") -mask = np.eye(num_regions) -sns.heatmap(sim_FC, mask = mask, center=0, cmap='RdBu_r', vmin=-1.0, vmax = 1.0) - - -# %% -# CNMM Validation Model -# --------------------------------------------------- -# -# The Multi-Modal Model - -model.eeg.params.LF = model.eeg.params.LF.cpu() - -val_sim_len = 20*1000 # Simulation length in msecs -model_validate = RWWEI2_EEG_BOLD_np(num_regions, num_channels, model.params, model.eeg.params, model.bold.params, Con_Mtx.detach().cpu().numpy(), dist_mtx.detach().cpu().numpy(), step_size, val_sim_len) - -sim_vals, hE = model_validate.forward(external = 0, hx = model_validate.createIC(ver = 0), hE = 0) - - -# %% -# Plots of S_E and S_I Validation -# - -plt.figure(figsize = (16, 8)) -plt.title("S_E and S_I") -for n in range(num_regions): - plt.plot(sim_vals['E'], label = "S_E Node = " + str(n)) - plt.plot(sim_vals['I'], label = "S_I Node = " + str(n)) - -plt.xlabel('Time Steps (multiply by step_size to get msec), step_size = ' + str(step_size)) -plt.legend() - - -# %% -# Plots of EEG PSD Validation -# - -sampleFreqHz = 1000*(1/step_size) -sdAxis, sdValues = CostsPSD.calcPSD(torch.tensor(sim_vals['eeg']), sampleFreqHz, minFreq = 2, maxFreq = 40) -sdAxis_dS, sdValues_dS = CostsPSD.downSmoothPSD(sdAxis, sdValues, 32) -sdAxis_dS, sdValues_dS_scaled = CostsPSD.scalePSD(sdAxis_dS, sdValues_dS) - -plt.figure() -for n in range(num_channels): - plt.plot(sdAxis_dS, sdValues_dS_scaled.detach()[:,n]) -plt.xlabel('Hz') -plt.ylabel('PSD') -plt.title("Simulated EEG PSD: After Training") - - -# %% -# Plots of BOLD FC Validation -# - -sim_FC = np.corrcoef((sim_vals['bold'].T)[:,skip_trans:]) - -plt.figure(figsize = (8, 8)) -plt.title("Simulated BOLD FC: After Training") -mask = np.eye(num_regions) -sns.heatmap(sim_FC, mask = mask, center=0, cmap='RdBu_r', vmin=-1.0, vmax = 1.0) \ No newline at end of file +plt.title("Simulated EEG PSD: After Training") \ No newline at end of file diff --git a/examples/eg003r__fitting_rww_example.py b/examples/eg003r__fitting_rww_example.py index 67bdad7d..0ae4a5fa 100644 --- a/examples/eg003r__fitting_rww_example.py +++ b/examples/eg003r__fitting_rww_example.py @@ -110,7 +110,7 @@ # %% # call model want to fit -model = RNNRWW(node_size, TPperWindow, step_size, repeat_size, tr, sc, True, params) +model = RNNRWW(params, node_size =node_size, TRs_per_window =TPperWindow, step_size=step_size, tr=tr, sc=sc, use_fit_gains=True) # %% # create objective function diff --git a/examples/eg004r__fitting_JR_example.py b/examples/eg004r__fitting_JR_example.py index d8a43f53..2db38077 100644 --- a/examples/eg004r__fitting_JR_example.py +++ b/examples/eg004r__fitting_JR_example.py @@ -108,7 +108,7 @@ # %% # call model want to fit -model = RNNJANSEN(node_size, TPperWindow, step_size, output_size, tr, sc, lm, dist, True, False, params) +model = RNNJANSEN(params, node_size=node_size, TRs_per_window=TPperWindow, step_size=step_size, output_size=output_size, tr=tr, sc=sc, lm=lm, dist=dist, use_fit_gains=True, use_fit_lfm = False) # %% # create objective function diff --git a/examples/eg005r__gpu_support.py b/examples/eg005r__gpu_support.py index e173cac2..6794133f 100644 --- a/examples/eg005r__gpu_support.py +++ b/examples/eg005r__gpu_support.py @@ -16,6 +16,10 @@ # Importage # --------------------------------------------------- # +# os stuff +import os +import sys +sys.path.append('..') # whobpyt stuff import whobpyt @@ -112,10 +116,6 @@ end_time = time.time() print(str((end_time - start_time)/60) + " minutes") -# %% -# Plots of loss over Training -plt.plot(np.arange(1,len(F.trainingStats.loss)+1), F.trainingStats.loss) -plt.title("Total Loss over Training Epochs") # %% @@ -124,89 +124,9 @@ # # Simulation Length -step_size = 0.1 # Step Size in msecs -sim_len = 5000 # Simulation length in msecs -model = RWWEI2_EEG_BOLD(num_regions, num_channels, model.params, paramsEEG, paramsBOLD, Con_Mtx, dist_mtx, step_size, sim_len, device) -targetValue = torch.tensor([0.164]).to(device) -objFun = CostsmmRWWEI2(num_regions, simKey = "E", targetValue = targetValue, device = device) - -# Create a Fitting Object -F = Fitting_FNGFPG(model, objFun, device) - -# Training Data -empSubject = {} -empSubject['EEG_FC'] = channelFC -empSubject['BOLD_FC'] = sourceFC -num_epochs = 3 -num_recordings = 1 -block_len = 100 # in msec - -# model training -start_time = time.time() -F.train(stim = 0, empDatas = [empSubject], num_epochs = num_epochs, block_len = block_len, learningrate = 0.05, resetIC = False) -end_time = time.time() -print(str((end_time - start_time)/60) + " minutes") # %% # Plots of loss over Training plt.plot(np.arange(1,len(F.trainingStats.loss)+1), F.trainingStats.loss) plt.title("Total Loss over Training Epochs") - - -# %% -# CNMM Verification Model -# --------------------------------------------------- -# -# The Multi-Modal Model - -model.eeg.params.LF = model.eeg.params.LF.cpu() - -val_sim_len = 20*1000 # Simulation length in msecs -model_validate = RWWEI2_EEG_BOLD_np(num_regions, num_channels, model.params, model.eeg.params, model.bold.params, Con_Mtx.detach().cpu().numpy(), dist_mtx.detach().cpu().numpy(), step_size, val_sim_len) - -sim_vals, hE = model_validate.forward(external = 0, hx = model_validate.createIC(ver = 0), hE = 0) - - -# %% -# Plots of S_E and S_I Verification -# - -plt.figure(figsize = (16, 8)) -plt.title("S_E and S_I") -for n in range(num_regions): - plt.plot(sim_vals['E'][0:10000, n], label = "S_E Node = " + str(n)) - plt.plot(sim_vals['I'][0:10000, n], label = "S_I Node = " + str(n)) - -plt.xlabel('Time Steps (multiply by step_size to get msec), step_size = ' + str(step_size)) -plt.legend() - - -# %% -# Plots of EEG PSD Verification -# - -sampleFreqHz = 1000*(1/step_size) -sdAxis, sdValues = CostsPSD.calcPSD(torch.tensor(sim_vals['eeg']), sampleFreqHz, minFreq = 2, maxFreq = 40) -sdAxis_dS, sdValues_dS = CostsPSD.downSmoothPSD(sdAxis, sdValues, 32) -sdAxis_dS, sdValues_dS_scaled = CostsPSD.scalePSD(sdAxis_dS, sdValues_dS) - -plt.figure() -for n in range(num_channels): - plt.plot(sdAxis_dS, sdValues_dS_scaled.detach()[:,n]) -plt.xlabel('Hz') -plt.ylabel('PSD') -plt.title("Simulated EEG PSD: After Training") - - -# %% -# Plots of BOLD FC Verification -# - -skip_trans = int(500/step_size) -sim_FC = np.corrcoef((sim_vals['bold'].T)[:,skip_trans:]) - -plt.figure(figsize = (8, 8)) -plt.title("Simulated BOLD FC: After Training") -mask = np.eye(num_regions) -sns.heatmap(sim_FC, mask = mask, center=0, cmap='RdBu_r', vmin=-1.0, vmax = 1.0) \ No newline at end of file diff --git a/whobpyt/datatypes/AbstractNMM.py b/whobpyt/datatypes/AbstractNMM.py index 9793ad5a..c6beafac 100644 --- a/whobpyt/datatypes/AbstractNMM.py +++ b/whobpyt/datatypes/AbstractNMM.py @@ -1,11 +1,13 @@ import torch - +from whobpyt.datatypes.parameter import par +from torch.nn.parameter import Parameter class AbstractNMM(torch.nn.Module): # This is the abstract class for the models (typically neural mass models) that are being trained. # The neuroimaging modality might be integrated into the model as well. - def __init__(self): + def __init__(self, params): super(AbstractNMM, self).__init__() # May not want to enherit from torch.nn.Module in the future + self.params = params self.state_names = ["None"] # The names of the state variables of the model self.output_names = ["None"] # The variable to be used as output from the NMM, for purposes such as the input to an objective function @@ -25,7 +27,34 @@ def setModelParameters(self): # Setting the parameters that will be optimized as either model parameters or 2ndLevel/hyper # parameters (for various optional features). # This should be called in the __init__() function implementation for convenience if possible. - pass + # If use_fit_lfm is True, set lm as an attribute as type Parameter (containing variance information) + param_reg = [] + param_hyper = [] + + var_names = [a for a in dir(self.params) if (type(getattr(self.params, a)) == par)] + for var_name in var_names: + var = getattr(self.params, var_name) + if (var.fit_hyper): + if var_name == 'lm': + size = var.prior_var.shape + var.val = Parameter(var.val.detach() - 1 * torch.ones((size[0], size[1]))) # TODO: This is not consistent with what user would expect giving a variance + param_hyper.append(var.prior_mean) + param_hyper.append(var.prior_var) + elif (var != 'std_in'): + var.randSet() #TODO: This should be done before giving params to model class + param_hyper.append(var.prior_mean) + param_hyper.append(var.prior_var) + + if (var.fit_par): + param_reg.append(var.val) #TODO: This should got before fit_hyper, but need to change where randomness gets added in the code first + + if (var.fit_par | var.fit_hyper): + self.track_params.append(var_name) #NMM Parameters + + if var_name == 'lm': + setattr(self, var_name, var.val) + + self.params_fitted = {'modelparameter': param_reg,'hyperparameter': param_hyper} def createIC(self, ver): # Create the initial conditions for the model state variables. diff --git a/whobpyt/models/BOLD/BOLD.py b/whobpyt/models/BOLD/BOLD.py index 4ec143da..86594fa9 100644 --- a/whobpyt/models/BOLD/BOLD.py +++ b/whobpyt/models/BOLD/BOLD.py @@ -1,7 +1,7 @@ import torch from whobpyt.datatypes import AbstractMode -class BOLD_Layer(AbstractMode): +class BOLD_Layer(torch.nn.Module): ''' Balloon-Windkessel Hemodynamic Response Function Forward Model @@ -29,7 +29,10 @@ def __init__(self, num_regions, params, useBC = False, device = torch.device('cp self.device = device self.num_blocks = 1 - + self.params_fitted = {} + self.params_fitted['modelparameter'] =[] + self.params_fitted['hyperparameter'] =[] + self.track_params = [] self.params = params self.setModelParameters() diff --git a/whobpyt/models/EEG/EEG.py b/whobpyt/models/EEG/EEG.py index 410b1164..78c994d6 100644 --- a/whobpyt/models/EEG/EEG.py +++ b/whobpyt/models/EEG/EEG.py @@ -1,7 +1,7 @@ import torch from whobpyt.datatypes import AbstractMode -class EEG_Layer(AbstractMode): +class EEG_Layer(torch.nn.Module): ''' Lead Field Matrix multiplication which converts Source Space EEG to Channel Space EEG @@ -21,7 +21,11 @@ def __init__(self, num_regions, params, num_channels, device = torch.device('cpu self.num_regions = num_regions self.num_channels = num_channels + self.params_fitted = {} + self.params_fitted['modelparameter'] =[] + self.params_fitted['hyperparameter'] =[] + self.track_params = [] self.device = device self.num_blocks = 1 @@ -34,44 +38,39 @@ def info(self): return {"state_names": ["None"], "output_name": "eeg"} def setModelParameters(self): - return setModelParameters(self) + self.LF = self.params.LF.to(self.device) def createIC(self, ver): pass def forward(self, step_size, sim_len, node_history, device = torch.device('cpu')): - return forward(self, step_size, sim_len, node_history) - -def setModelParameters(self): - ############################################# - ## EEG Lead Field - ############################################# - - self.LF = self.params.LF.to(self.device) - -def forward(self, step_size, sim_len, node_history): + hE = torch.tensor(1.0).to(self.device) #Dummy variable - hE = torch.tensor(1.0).to(self.device) #Dummy variable - - # Runs the EEG Model - # - # INPUT - # step_size: Float - The step size in msec which must match node_history step size. - # sim_len: Int - The amount of EEG to simulate in msec, and should match time simulated in node_history. - # node_history: Tensor - [time_points, regions, num_blocks] # This would be input coming from NMM - # device: torch.device - Whether to run on GPU or CPU - default is CPU and GPU has not been tested for Network_NMM code - # - # OUTPUT - # layer_history: Tensor - [time_steps, regions, num_blocks] - # + # Runs the EEG Model + # + # INPUT + # step_size: Float - The step size in msec which must match node_history step size. + # sim_len: Int - The amount of EEG to simulate in msec, and should match time simulated in node_history. + # node_history: Tensor - [time_points, regions, num_blocks] # This would be input coming from NMM + # device: torch.device - Whether to run on GPU or CPU - default is CPU and GPU has not been tested for Network_NMM code + # + # OUTPUT + # layer_history: Tensor - [time_steps, regions, num_blocks] + # + + layer_hist = torch.zeros(int((sim_len/step_size)/self.num_blocks), self.num_channels, self.num_blocks).to(self.device) + print(layer_hist.shape) + num_steps = int((sim_len/step_size)/self.num_blocks) + for i in range(num_steps): + layer_hist[i, :, :] = torch.matmul(self.LF, node_history[i, :, :]) # TODO: Check dimensions and if correct transpose of LF + + sim_vals = {} + sim_vals["eeg"] = layer_hist.permute((1,0,2)) # time x node x batch -> node x time x batch - layer_hist = torch.zeros(int((sim_len/step_size)/self.num_blocks), self.num_channels, self.num_blocks).to(self.device) + return sim_vals, hE + - num_steps = int((sim_len/step_size)/self.num_blocks) - for i in range(num_steps): - layer_hist[i, :, :] = torch.matmul(self.LF, node_history[i, :, :]) # TODO: Check dimensions and if correct transpose of LF - sim_vals = {} - sim_vals["eeg"] = layer_hist.permute((1,0,2)) # time x node x batch -> node x time x batch - return sim_vals, hE \ No newline at end of file + + \ No newline at end of file diff --git a/whobpyt/models/EEG/ParamsEEG.py b/whobpyt/models/EEG/ParamsEEG.py index fe80b1e1..51b29e31 100644 --- a/whobpyt/models/EEG/ParamsEEG.py +++ b/whobpyt/models/EEG/ParamsEEG.py @@ -9,4 +9,13 @@ def __init__(self, Lead_Field): ############################################# self.LF = Lead_Field # This should be [num_regions, num_channels] + + def to(self, device): + # Moves all parameters between CPU and GPU + + vars_names = [a for a in dir(self) if not a.startswith('__')] + for var_name in vars_names: + var = getattr(self, var_name) + if (type(var) == par): + var.to(device) \ No newline at end of file diff --git a/whobpyt/models/JansenRit/jansen_rit.py b/whobpyt/models/JansenRit/jansen_rit.py index 65975c96..da7f6e47 100644 --- a/whobpyt/models/JansenRit/jansen_rit.py +++ b/whobpyt/models/JansenRit/jansen_rit.py @@ -87,9 +87,10 @@ class RNNJANSEN(AbstractNMM): """ - def __init__(self, node_size: int, - TRs_per_window: int, step_size: float, output_size: int, tr: float, sc: np.ndarray, lm: np.ndarray, dist: np.ndarray, - use_fit_gains: bool, use_fit_lfm: bool, params: ParamsJR) -> None: + def __init__(self, params: ParamsJR, node_size=200, + TRs_per_window= 20, step_size=0.0001, output_size=64, tr=0.001, sc=np.ones((200,200)), \ + lm=np.ones((64,200)), dist=np.ones((200,200)), + use_fit_gains=True, use_fit_lfm=False): """ Parameters ---------- @@ -118,7 +119,7 @@ def __init__(self, node_size: int, """ method_arg_type_check(self.__init__) # Check that the passed arguments (excluding self) abide by their expected data types - super(RNNJANSEN, self).__init__() + super(RNNJANSEN, self).__init__(params) self.state_names = ['E', 'Ev', 'I', 'Iv', 'P', 'Pv'] self.output_names = ["eeg"] self.track_params = [] #Is populated during setModelParameters() @@ -140,18 +141,9 @@ def __init__(self, node_size: int, self.output_size = lm.shape[0] # number of EEG channels self.setModelParameters() + self.setModelSCParameters() - def info(self): - # TODO: Make sure this method is useful - """ - Returns a dictionary with the names of the states and the output. - - Returns - ------- - Dict[str, List[str]] - """ - - return {"state_names": ['E', 'Ev', 'I', 'Iv', 'P', 'Pv'], "output_names": ["eeg"]} + def createIC(self, ver): """ @@ -195,14 +187,11 @@ def createDelayIC(self, ver): return torch.tensor(np.random.uniform(state_lb, state_ub, (self.node_size, delays_max)), dtype=torch.float32) - def setModelParameters(self): + def setModelSCParameters(self): """ Sets the parameters of the model. """ - - param_reg = [] - param_hyper = [] - + # Set w_bb, w_ff, and w_ll as attributes as type Parameter if use_fit_gains is True if self.use_fit_gains: self.w_bb = Parameter(torch.tensor(np.zeros((self.node_size, self.node_size)) + 0.05, # the backwards gains @@ -211,45 +200,15 @@ def setModelParameters(self): dtype=torch.float32)) self.w_ll = Parameter(torch.tensor(np.zeros((self.node_size, self.node_size)) + 0.05, # the lateral gains dtype=torch.float32)) - param_reg.append(self.w_ll) - param_reg.append(self.w_ff) - param_reg.append(self.w_bb) + self.params_fitted['modelparameter'].append(self.w_ll) + self.params_fitted['modelparameter'].append(self.w_ff) + self.params_fitted['modelparameter'].append(self.w_bb) else: self.w_bb = torch.tensor(np.zeros((self.node_size, self.node_size)), dtype=torch.float32) self.w_ff = torch.tensor(np.zeros((self.node_size, self.node_size)), dtype=torch.float32) self.w_ll = torch.tensor(np.zeros((self.node_size, self.node_size)), dtype=torch.float32) - # If use_fit_lfm is True, set lm as an attribute as type Parameter (containing variance information) - if self.use_fit_lfm: - self.lm = Parameter(torch.tensor(self.lm, dtype=torch.float32)) # leadfield matrix from sourced data to eeg - param_reg.append(self.lm) - else: - self.lm = torch.tensor(self.lm, dtype=torch.float32) - - var_names = [a for a in dir(self.params) if (type(getattr(self.params, a)) == par)] - for var_name in var_names: - var = getattr(self.params, var_name) - if (var.fit_hyper): - if var_name == 'lm': - size = var.prior_var.shape - var.val = Parameter(var.val.detach() - 1 * torch.ones((size[0], size[1]))) # TODO: This is not consistent with what user would expect giving a variance - param_hyper.append(var.prior_mean) - param_hyper.append(var.prior_var) - elif (var != 'std_in'): - var.randSet() #TODO: This should be done before giving params to model class - param_hyper.append(var.prior_mean) - param_hyper.append(var.prior_var) - - if (var.fit_par): - param_reg.append(var.val) #TODO: This should got before fit_hyper, but need to change where randomness gets added in the code first - - if (var.fit_par | var.fit_hyper): - self.track_params.append(var_name) #NMM Parameters - - if var_name == 'lm': - setattr(self, var_name, var.val) - - self.params_fitted = {'modelparameter': param_reg,'hyperparameter': param_hyper} + def forward(self, external, hx, hE): @@ -313,6 +272,8 @@ def forward(self, external, hx, hE): g_f = (lb * con_1 + m(self.params.g_f.value())) g_b = (lb * con_1 + m(self.params.g_b.value())) + + lm = self.params.lm.value() next_state = {} @@ -424,9 +385,9 @@ def forward(self, external, hx, hE): hE = torch.cat([M, hE[:, :-1]], dim=1) # update placeholders for pyramidal buffer # Capture the states at every tr in the placeholders which is then used in the cost calculation. - lm_t = (self.lm.T / torch.sqrt(self.lm ** 2).sum(1)).T + lm_t = (lm.T / torch.sqrt(lm ** 2).sum(1)).T self.lm_t = (lm_t - 1 / self.output_size * torch.matmul(torch.ones((1, self.output_size)), lm_t)) - temp = cy0 * torch.matmul(self.lm_t, M[:200, :]) - 1 * y0 + temp = cy0 * torch.matmul(self.lm_t, E-I) - 1 * y0 eeg_window.append(temp) # Update the current state. diff --git a/whobpyt/models/RWW/wong_wang.py b/whobpyt/models/RWW/wong_wang.py index bfe3ab07..93270ac4 100644 --- a/whobpyt/models/RWW/wong_wang.py +++ b/whobpyt/models/RWW/wong_wang.py @@ -25,12 +25,16 @@ class RNNRWW(AbstractNMM): Attributes --------- - state_names: list - A list of model state variable names + state_names: an array of the list + An array of list of model state variable names + pop_names: an array of list + An array of list of population names output_names: list A list of model output variable names model_name: string The name of the model itself + pop_size : int in this model just one + The number of population in the WWD model state_size : int The number of states in the WWD model tr : float @@ -43,22 +47,14 @@ class RNNRWW(AbstractNMM): The number of BOLD TRs to simulate in one forward call node_size: int The number of ROIs - sampling_size: int - This is related to an averaging of NMM values before inputing into hemodynamic equaitons. This is non-standard. + sc: float node_size x node_size array The structural connectivity matrix sc_fitted: bool The fitted structural connectivity use_fit_gains: tensor with node_size x node_size (grad on depends on fit_gains) Whether to fit the structural connectivity, will fit via connection gains: exp(gains_con)*sc - use_Laplacian: bool - Whether to use the negative laplacian of the (fitted) structural connectivity as the structural connectivity - use_Bifurcation: bool - Use a custom objective function component - use_Gaussian_EI: bool - Use a custom objective function component - use_dynamic_boundary: bool - Whether to have tanh function applied at each time step to constrain parameter values. Simulation results will become dependent on a certian step_size. + params: ParamsRWW A object that contains the parameters for the RWW nodes params_fitted: dictionary @@ -92,13 +88,10 @@ class RNNRWW(AbstractNMM): std for state noise and output noise """ - use_fit_lfm = False - input_size = 2 + - def __init__(self, node_size: int, - TRs_per_window: int, step_size: float, sampling_size: float, tr: float, sc: float, use_fit_gains: bool, - params: ParamsRWW, use_Bifurcation: bool = True, use_Gaussian_EI: bool = False, use_Laplacian: bool = True, - use_dynamic_boundary: bool = True) -> None: + def __init__(self, params: ParamsRWW, node_size = 68, TRs_per_window = 20, step_size = 0.05, \ + tr=1.0, sc=np.ones((68,68)), use_fit_gains= True): """ Parameters ---------- @@ -109,8 +102,6 @@ def __init__(self, node_size: int, The number of BOLD TRs to simulate in one forward call step_size: float Integration step for forward model - sampling_size: - This is related to an averaging of NMM values before inputing into hemodynamic equaitons. This is non-standard. tr : float tr of fMRI image. That is, the spacing betweeen images in the time series. sc: float node_size x node_size array @@ -118,23 +109,14 @@ def __init__(self, node_size: int, use_fit_gains: bool Whether to fit the structural connectivity, will fit via connection gains: exp(gains_con)*sc params: ParamsRWW - A object that contains the parameters for the RWW nodes - use_Bifurcation: bool - Use a custom objective function component - use_Gaussian_EI: bool - Use a custom objective function component - use_Laplacian: bool - Whether to use the negative laplacian of the (fitted) structural connectivity as the structural connectivity - use_dynamic_boundary: bool - Whether to have tanh function applied at each time step to constrain parameter values. Simulation results will become dependent on a certian step_size. + A object that contains the parameters for the RWW nodes. """ method_arg_type_check(self.__init__) # Check that the passed arguments (excluding self) abide by their expected data types - super(RNNRWW, self).__init__() + super(RNNRWW, self).__init__(params) self.state_names = ['E', 'I', 'x', 'f', 'v', 'q'] self.output_names = ["bold"] - self.track_params = [] #Is populated during setModelParameters() self.model_name = "RWW" self.state_size = 6 # 6 states WWD model @@ -144,42 +126,16 @@ def __init__(self, node_size: int, self.steps_per_TR = int(tr / step_size) self.TRs_per_window = TRs_per_window # size of the batch used at each step self.node_size = node_size # num of ROI - self.sampling_size = sampling_size self.sc = sc # matrix node_size x node_size structure connectivity self.sc_fitted = torch.tensor(sc, dtype=torch.float32) # placeholder self.use_fit_gains = use_fit_gains # flag for fitting gains - self.use_Laplacian = use_Laplacian - self.use_Bifurcation = use_Bifurcation - self.use_Gaussian_EI = use_Gaussian_EI - self.use_dynamic_boundary = use_dynamic_boundary - self.params = params - self.params_fitted = {} - + self.output_size = node_size self.setModelParameters() + self.setModelSCParameters() - def info(self): - """ - - A function that returns a dictionary with model information. - - Parameters - ---------- - - None - - - Returns - ---------- - - Dictionary of Lists - The List contain State Names and Output Names - - - """ - return {"state_names": ['E', 'I', 'x', 'f', 'v', 'q'], "output_names": ["bold"]} def createIC(self, ver): """ @@ -206,33 +162,45 @@ def createIC(self, ver): return torch.tensor(0.2 * np.random.uniform(0, 1, (self.node_size, self.state_size)) + np.array( [0, 0, 0, 1.0, 1.0, 1.0]), dtype=torch.float32) - def setModelParameters(self): + + def createDelayIC(self, ver): """ - - A function that assigns model parameters as model attributes and also to assign parameters and hyperparameters for fitting, - so that the inherited Torch functionality can be used. - This practice may be replaced soon. + Creates the initial conditions for the delays. - Parameters ---------- - - None - - + ver : int + Initial condition version. (in the JR model, the version is not used. It is just for consistency with other models) + Returns - ---------- + ------- + torch.Tensor + Tensor of shape (node_size, delays_max) with random values between `state_lb` and `state_ub`. + """ + + delays_max = 500 + state_ub = 0.5 + state_lb = 0.1 + + return torch.tensor(np.random.uniform(state_lb, state_ub, (self.node_size, delays_max)), dtype=torch.float32) + + def setModelSCParameters(self): - Dictionary of Lists - Keys are State Names and Output Names (with _window appended to the name) - Contents are the time series from model simulation + """ + Sets the parameters of the model. + """ + - """ - - - # set states E I f v mean and 1/sqrt(variance) - return setModelParameters(self) + # Set w_bb, w_ff, and w_ll as attributes as type Parameter if use_fit_gains is True + if self.use_fit_gains: + + self.w_ll = Parameter(torch.tensor(np.zeros((self.node_size, self.node_size)) + 0.05, # the lateral gains + dtype=torch.float32)) + self.params_fitted['modelparameter'].append(self.w_ll) + else: + self.w_ll = torch.tensor(np.zeros((self.node_size, self.node_size)), dtype=torch.float32) + def forward(self, external, hx, hE): """ @@ -240,7 +208,7 @@ def forward(self, external, hx, hE): Parameters ---------- - external: tensor with node_size x steps_per_TR x TRs_per_window x input_size + external: tensor with node_size x pop_size x steps_per_TR x TRs_per_window x input_size noise for states hx: tensor with node_size x state_size @@ -251,198 +219,108 @@ def forward(self, external, hx, hE): next_state: dictionary with Tensors Tensor dimension [Num_Time_Points, Num_Regions] - with keys: 'current_state''bold_window''E_window''I_window''x_window''f_window''v_window''q_window' + with keys: 'current_state''states_window''bold_window' record new states and BOLD """ - - return integration_forward(self, external, hx, hE) - -def h_tf(a, b, d, z): - """ - Neuronal input-output functions of excitatory pools and inhibitory pools. - Take the variables a, x, and b and convert them to a linear equation (a*x - b) while adding a small - amount of noise 0.00001 while dividing that term to an exponential of the linear equation multiplied by the - d constant for the appropriate dimensions. - """ - num = 0.00001 + torch.abs(a * z - b) - den = 0.00001 * d + torch.abs(1.0000 - torch.exp(-d * (a * z - b))) - return torch.divide(num, den) - -def setModelParameters(model): - param_reg = [] #NMM Equation Parameters - param_hyper = [] #Mean and Variance of NMM Equation Parameters, and others + # Generate the ReLU module for model parameters gEE gEI and gIE + m = torch.nn.ReLU() - if model.use_Gaussian_EI: - model.E_m = Parameter(torch.tensor(0.16, dtype=torch.float32)) - param_hyper.append(model.E_m) - model.I_m = Parameter(torch.tensor(0.1, dtype=torch.float32)) - param_hyper.append(model.I_m) - # model.f_m = Parameter(torch.tensor(1.0, dtype=torch.float32)) - model.v_m = Parameter(torch.tensor(1.0, dtype=torch.float32)) - param_hyper.append(model.v_m) - # model.x_m = Parameter(torch.tensor(0.16, dtype=torch.float32)) - model.q_m = Parameter(torch.tensor(1.0, dtype=torch.float32)) - param_hyper.append(model.q_m) - - model.E_v_inv = Parameter(torch.tensor(2500, dtype=torch.float32)) - param_hyper.append(model.E_v_inv) - model.I_v_inv = Parameter(torch.tensor(2500, dtype=torch.float32)) - param_hyper.append(model.I_v_inv) - # model.f_v = Parameter(torch.tensor(100, dtype=torch.float32)) - model.v_v_inv = Parameter(torch.tensor(100, dtype=torch.float32)) - param_hyper.append(model.v_v_inv) - # model.x_v = Parameter(torch.tensor(100, dtype=torch.float32)) - model.q_v_inv = Parameter(torch.tensor(100, dtype=torch.float32)) - param_hyper.append(model.v_v_inv) - - # hyper parameters (variables: need to calculate gradient) to fit density - # of gEI and gIE (the shape from the bifurcation analysis on an isolated node) - if model.use_Bifurcation: - model.sup_ca = Parameter(torch.tensor(0.5, dtype=torch.float32)) - param_hyper.append(model.sup_ca) - model.sup_cb = Parameter(torch.tensor(20, dtype=torch.float32)) - param_hyper.append(model.sup_cb) - model.sup_cc = Parameter(torch.tensor(10, dtype=torch.float32)) - param_hyper.append(model.sup_cc) - - # set gains_con as Parameter if fit_gain is True - if model.use_fit_gains: - model.gains_con = Parameter(torch.tensor(np.zeros((model.node_size, model.node_size)) + 0.05, - dtype=torch.float32)) # connenction gain to modify empirical sc - param_reg.append(model.gains_con) - else: - model.gains_con = torch.tensor(np.zeros((model.node_size, model.node_size)), dtype=torch.float32) - - var_names = [a for a in dir(model.params) if not a.startswith('__')] - for var_name in var_names: - var = getattr(model.params, var_name) - if (type(var) == par): - if (var.fit_hyper == True): - var.randSet() #TODO: This should be done before giving params to model class - param_hyper.append(var.prior_mean) - param_hyper.append(var.prior_var) #TODO: Currently this is _v_inv but should set everything to just variance unless there is a reason to keep the inverse? - - if (var.fit_par == True): - param_reg.append(var.val) #TODO: This should got before fit_hyper, but need to change where randomness gets added in the code first - model.track_params.append(var_name) - - if (var.fit_par | var.fit_hyper): - model.track_params.append(var_name) #NMM Parameters - - model.params_fitted = {'modelparameter': param_reg,'hyperparameter': param_hyper} - -def integration_forward(model, external, hx, hE): - - # Generate the ReLU module for model parameters gEE gEI and gIE - m = torch.nn.ReLU() - - - # Defining NMM Parameters to simplify later equations - std_in = (0.02 + m(model.params.std_in.value())) # standard deviation of the Gaussian noise - std_out = (0.00 + m(model.params.std_out.value())) # standard deviation of the Gaussian noise - # Parameters for the ODEs - # Excitatory population - W_E = model.params.W_E.value() # scale of the external input - tau_E = model.params.tau_E.value() # decay time - gamma_E = model.params.gamma_E.value() # other dynamic parameter (?) - - # Inhibitory population - W_I = model.params.W_I.value() # scale of the external input - tau_I = model.params.tau_I.value() # decay time - gamma_I = model.params.gamma_I.value() # other dynamic parameter (?) - - # External input - I_0 = model.params.I_0.value() # external input - I_external = model.params.I_external.value() # external stimulation - - # Coupling parameters - g = model.params.g.value() # global coupling (from all nodes E_j to single node E_i) - g_EE = (0.001 + m(model.params.g_EE.value())) # local self excitatory feedback (from E_i to E_i) - g_IE = (0.001 + m(model.params.g_IE.value())) # local inhibitory coupling (from I_i to E_i) - g_EI = (0.001 + m(model.params.g_EI.value())) # local excitatory coupling (from E_i to I_i) - - aE = model.params.aE.value() - bE = model.params.bE.value() - dE = model.params.dE.value() - aI = model.params.aI.value() - bI = model.params.bI.value() - dI = model.params.dI.value() - - # Output (BOLD signal) - alpha = model.params.alpha.value() - rho = model.params.rho.value() - k1 = model.params.k1.value() - k2 = model.params.k2.value() - k3 = model.params.k3.value() # adjust this number from 0.48 for BOLD fluctruate around zero - V = model.params.V.value() - E0 = model.params.E0.value() - tau_s = model.params.tau_s.value() - tau_f = model.params.tau_f.value() - tau_0 = model.params.tau_0.value() - mu = model.params.mu.value() - - - next_state = {} - - # hx is current state (6) 0: E 1:I (neural activities) 2:x 3:f 4:v 5:f (BOLD) - - x = hx[:, 2:3] - f = hx[:, 3:4] - v = hx[:, 4:5] - q = hx[:, 5:6] - - dt = torch.tensor(model.step_size, dtype=torch.float32) - - # Update the Laplacian based on the updated connection gains gains_con. - if model.sc.shape[0] > 1: - + # Defining NMM Parameters to simplify later equations + std_in = 0.02+m(self.params.std_in.value()) # 0.02 the lower bound (standard deviation of the Gaussian noise) + + # Parameters for the ODEs + # Excitatory population + W_E = self.params.W_E.value() # scale of the external input + tau_E = self.params.tau_E.value() # decay time + gamma_E = self.params.gamma_E.value() # other dynamic parameter (?) + + # Inhibitory population + W_I = self.params.W_I.value() # scale of the external input + tau_I = self.params.tau_I.value() # decay time + gamma_I = self.params.gamma_I.value() # other dynamic parameter (?) + + # External input + I_0 = self.params.I_0.value() # external input + I_external = self.params.I_external.value() # external stimulation + + # Coupling parameters + g = self.params.g.value() # global coupling (from all nodes E_j to single node E_i) + g_EE = m(self.params.g_EE.value()) # local self excitatory feedback (from E_i to E_i) + g_IE = m(self.params.g_IE.value()) # local inhibitory coupling (from I_i to E_i) + g_EI = m(self.params.g_EI.value()) # local excitatory coupling (from E_i to I_i) + + aE = self.params.aE.value() + bE = self.params.bE.value() + dE = self.params.dE.value() + aI = self.params.aI.value() + bI = self.params.bI.value() + dI = self.params.dI.value() + + # Output (BOLD signal) + alpha = self.params.alpha.value() + rho = self.params.rho.value() + k1 = self.params.k1.value() + k2 = self.params.k2.value() + k3 = self.params.k3.value() # adjust this number from 0.48 for BOLD fluctruate around zero + V = self.params.V.value() + E0 = self.params.E0.value() + tau_s = self.params.tau_s.value() + tau_f = self.params.tau_f.value() + tau_0 = self.params.tau_0.value() + mu = self.params.mu.value() + + + next_state = {} + + # hx is current state (6) 0: E 1:I (neural activities) 2:x 3:f 4:v 5:f (BOLD) + + x = hx[:,2:3] + f = hx[:,3:4] + v = hx[:,4:5] + q = hx[:,5:6] + + dt = torch.tensor(self.step_size, dtype=torch.float32) + # Update the Laplacian based on the updated connection gains gains_con. - sc_mod = torch.exp(model.gains_con) * torch.tensor(model.sc, dtype=torch.float32) - sc_mod_normalized = (0.5 * (sc_mod + torch.transpose(sc_mod, 0, 1))) / torch.linalg.norm( - 0.5 * (sc_mod + torch.transpose(sc_mod, 0, 1))) - model.sc_fitted = sc_mod_normalized - - if model.use_Laplacian: + if self.sc.shape[0] > 1: + + # Update the Laplacian based on the updated connection gains gains_con. + sc_mod = torch.exp(self.w_ll) * torch.tensor(self.sc, dtype=torch.float32) + sc_mod_normalized = (0.5 * (sc_mod + torch.transpose(sc_mod, 0, 1))) / torch.linalg.norm( + 0.5 * (sc_mod + torch.transpose(sc_mod, 0, 1))) + self.sc_fitted = sc_mod_normalized + lap_adj = -torch.diag(sc_mod_normalized.sum(1)) + sc_mod_normalized + else: - lap_adj = sc_mod_normalized - - else: - lap_adj = torch.tensor(np.zeros((1, 1)), dtype=torch.float32) - - # placeholder for the updated current state - current_state = torch.zeros_like(hx) - - # placeholders for output BOLD, history of E I x f v and q - # placeholders for output BOLD, history of E I x f v and q - bold_window = torch.zeros((model.node_size, model.TRs_per_window)) - # E_window = torch.zeros((model.node_size,model.TRs_per_window)) - # I_window = torch.zeros((model.node_size,model.TRs_per_window)) - - x_window = torch.zeros((model.node_size, model.TRs_per_window)) - f_window = torch.zeros((model.node_size, model.TRs_per_window)) - v_window = torch.zeros((model.node_size, model.TRs_per_window)) - q_window = torch.zeros((model.node_size, model.TRs_per_window)) - - E_hist = torch.zeros((model.node_size, model.TRs_per_window, model.steps_per_TR)) - I_hist = torch.zeros((model.node_size, model.TRs_per_window, model.steps_per_TR)) - E_mean = hx[:, 0:1] - I_mean = hx[:, 1:2] - - # Use the forward model to get neural activity at ith element in the window. - if model.use_dynamic_boundary: - for TR_i in range(model.TRs_per_window): + lap_adj = torch.tensor(np.zeros((1, 1)), dtype=torch.float32) + + + + # placeholders for output BOLD, history of E I x f v and q + # placeholders for output BOLD, history of E I x f v and q + bold_window = [] + E_window = [] + I_window = [] + x_window = [] + f_window = [] + q_window = [] + v_window = [] + + + + E = hx[:,0:1] + I = hx[:,1:2] + #print(E.shape) + # Use the forward model to get neural activity at ith element in the window. + + for TR_i in range(self.TRs_per_window): # Since tr is about second we need to use a small step size like 0.05 to integrate the model states. - for step_i in range(model.steps_per_TR): - E = torch.zeros((model.node_size, model.sampling_size)) - I = torch.zeros((model.node_size, model.sampling_size)) - for sample_i in range(model.sampling_size): - E[:, sample_i] = E_mean[:, 0] + 0.02 * torch.randn(model.node_size) - I[:, sample_i] = I_mean[:, 0] + 0.001 * torch.randn(model.node_size) - + for step_i in range(self.steps_per_TR): + # Calculate the input recurrent. IE = torch.tanh(m(W_E * I_0 + g_EE * E + g * torch.matmul(lap_adj, E) - g_IE * I)) # input currents for E II = torch.tanh(m(W_I * I_0 + g_EI * E - I)) # input currents for I @@ -453,9 +331,9 @@ def integration_forward(model, external, hx, hE): # Update the states by step-size 0.05. E_next = E + dt * (-E * torch.reciprocal(tau_E) + gamma_E * (1. - E) * rE) \ - + torch.sqrt(dt) * torch.randn(model.node_size, model.sampling_size) * std_in ### equlibrim point at E=(tau_E*gamma_E*rE)/(1+tau_E*gamma_E*rE) + + torch.sqrt(dt) * torch.randn(self.node_size, 1) * std_in I_next = I + dt * (-I * torch.reciprocal(tau_I) + gamma_I * rI) \ - + torch.sqrt(dt) * torch.randn(model.node_size, model.sampling_size) * std_in + + torch.sqrt(dt) * torch.randn(self.node_size, 1) * std_in # Calculate the saturation for model states (for stability and gradient calculation). @@ -463,118 +341,58 @@ def integration_forward(model, external, hx, hE): E = torch.tanh(0.0000 + m(1.0 * E_next)) I = torch.tanh(0.0000 + m(1.0 * I_next)) - I_mean = I.mean(1)[:, np.newaxis] - E_mean = E.mean(1)[:, np.newaxis] - I_mean[I_mean < 0.00001] = 0.00001 - E_mean[E_mean < 0.00001] = 0.00001 - - E_hist[:, TR_i, step_i] = E_mean[:, 0] - I_hist[:, TR_i, step_i] = I_mean[:, 0] - - for TR_i in range(model.TRs_per_window): + - for step_i in range(model.steps_per_TR): - x_next = x + 1 * dt * (1 * E_hist[:, TR_i, step_i][:, np.newaxis] - torch.reciprocal(tau_s) * x \ + + x_next = x + 1 * dt * (1 * E - torch.reciprocal(tau_s) * x \ - torch.reciprocal(tau_f) * (f - 1)) f_next = f + 1 * dt * x v_next = v + 1 * dt * (f - torch.pow(v, torch.reciprocal(alpha))) * torch.reciprocal(tau_0) q_next = q + 1 * dt * (f * (1 - torch.pow(1 - rho, torch.reciprocal(f))) * torch.reciprocal(rho) \ - q * torch.pow(v, torch.reciprocal(alpha)) * torch.reciprocal(v)) * torch.reciprocal(tau_0) - + x = torch.tanh(x_next) f = (1 + torch.tanh(f_next - 1)) v = (1 + torch.tanh(v_next - 1)) q = (1 + torch.tanh(q_next - 1)) - + # Put x f v q from each tr to the placeholders for checking them visually. - x_window[:, TR_i] = x[:, 0] - f_window[:, TR_i] = f[:, 0] - v_window[:, TR_i] = v[:, 0] - q_window[:, TR_i] = q[:, 0] + E_window.append(E) + I_window.append(I) + x_window.append(x) + f_window.append(f) + v_window.append(v) + q_window.append(q) # Put the BOLD signal each tr to the placeholder being used in the cost calculation. - bold_window[:, TR_i] = (std_out * torch.randn(model.node_size, 1) + + bold_window.append((0.01 * torch.randn(self.node_size, 1) + 100.0 * V * torch.reciprocal(E0) * - (k1 * (1 - q) + k2 * (1 - q * torch.reciprocal(v)) + k3 * (1 - v)))[:, 0] - else: - - for TR_i in range(model.TRs_per_window): - - # Since tr is about second we need to use a small step size like 0.05 to integrate the model states. - for step_i in range(model.steps_per_TR): - E = torch.zeros((model.node_size, model.sampling_size)) - I = torch.zeros((model.node_size, model.sampling_size)) - for sample_i in range(model.sampling_size): - E[:, sample_i] = E_mean[:, 0] + 0.001 * torch.randn(model.node_size) - I[:, sample_i] = I_mean[:, 0] + 0.001 * torch.randn(model.node_size) - - # Calculate the input recurrent. - IE = 1 * torch.tanh(m(W_E * I_0 + g_EE * E + g * torch.matmul(lap_adj, E) - g_IE * I)) # input currents for E - II = 1 * torch.tanh(m(W_I * I_0 + g_EI * E - I)) # input currents for I - - # Calculate the firing rates. - rE = h_tf(aE, bE, dE, IE) # firing rate for E - rI = h_tf(aI, bI, dI, II) # firing rate for I - - # Update the states by step-size dt. - E_next = E + dt * (-E * torch.reciprocal(tau_E) + gamma_E * (1. - E) * rE) \ - + torch.sqrt(dt) * torch.randn(model.node_size, model.sampling_size) * std_in ### equlibrim point at E=(tau_E*gamma_E*rE)/(1+tau_E*gamma_E*rE) - I_next = I + dt * (-I * torch.reciprocal(tau_I) + gamma_I * rI) \ - + torch.sqrt(dt) * torch.randn(model.node_size, model.sampling_size) * std_in - - # Calculate the saturation for model states (for stability and gradient calculation). - E_next[E_next < 0.00001] = 0.00001 - I_next[I_next < 0.00001] = 0.00001 - # E_next[E_next>=0.9] = torch.tanh(1.6358*E_next[E_next>=0.9]) - E = E_next # torch.tanh(0.00001+m(1.0*E_next)) - I = I_next # torch.tanh(0.00001+m(1.0*I_next)) - - I_mean = I.mean(1)[:, np.newaxis] - E_mean = E.mean(1)[:, np.newaxis] - E_hist[:, TR_i, step_i] = torch.tanh(E_mean)[:, 0] - I_hist[:, TR_i, step_i] = torch.tanh(I_mean)[:, 0] - - # E_window[:,TR_i]=E_mean[:,0] - # I_window[:,TR_i]=I_mean[:,0] - - for TR_i in range(model.TRs_per_window): - - for step_i in range(model.steps_per_TR): - x_next = x + 1 * dt * (1 * E_hist[:, TR_i, step_i][:, np.newaxis] - torch.reciprocal(tau_s) * x \ - - torch.reciprocal(tau_f) * (f - 1)) - f_next = f + 1 * dt * x - v_next = v + 1 * dt * (f - torch.pow(v, torch.reciprocal(alpha))) * torch.reciprocal(tau_0) - q_next = q + 1 * dt * (f * (1 - torch.pow(1 - rho, torch.reciprocal(f))) * torch.reciprocal(rho) \ - - q * torch.pow(v, torch.reciprocal(alpha)) * torch.reciprocal(v)) * torch.reciprocal(tau_0) - - f_next[f_next < 0.001] = 0.001 - v_next[v_next < 0.001] = 0.001 - q_next[q_next < 0.001] = 0.001 - x = x_next # torch.tanh(x_next) - f = f_next # (1 + torch.tanh(f_next - 1)) - v = v_next # (1 + torch.tanh(v_next - 1)) - q = q_next # (1 + torch.tanh(q_next - 1)) - - # Put x f v q from each tr to the placeholders for checking them visually. - x_window[:, TR_i] = x[:, 0] - f_window[:, TR_i] = f[:, 0] - v_window[:, TR_i] = v[:, 0] - q_window[:, TR_i] = q[:, 0] - - # Put the BOLD signal each tr to the placeholder being used in the cost calculation. - bold_window[:, TR_i] = (std_out * torch.randn(model.node_size, 1) + - 100.0 * V * torch.reciprocal(E0) * - (k1 * (1 - q) + k2 * (1 - q * torch.reciprocal(v)) + k3 * (1 - v)))[:, 0] - - # Update the current state. - current_state = torch.cat([E_mean, I_mean, x, f, v, q], dim=1) - next_state['current_state'] = current_state - next_state['bold'] = bold_window - next_state['E'] = E_hist.reshape((model.node_size, -1)) - next_state['I'] = I_hist.reshape((model.node_size, -1)) - next_state['x'] = x_window - next_state['f'] = f_window - next_state['v'] = v_window - next_state['q'] = q_window + (k1 * (1 - q) + k2 * (1 - q * torch.reciprocal(v)) + k3 * (1 - v)))) + + + # Update the current state. + current_state = torch.cat([E, I, x,\ + f, v, q], dim=1) + next_state['current_state'] = current_state + next_state['bold'] = torch.cat(bold_window, dim =1) + next_state['E'] = torch.cat(E_window, dim =1) + next_state['I'] = torch.cat(I_window, dim =1) + next_state['x'] = torch.cat(x_window, dim =1) + next_state['f'] = torch.cat(f_window, dim =1) + next_state['v'] = torch.cat(v_window, dim =1) + next_state['q'] = torch.cat(q_window, dim =1) + + return next_state, hE + + - return next_state, hE \ No newline at end of file +def h_tf(a, b, d, z): + """ + Neuronal input-output functions of excitatory pools and inhibitory pools. + Take the variables a, x, and b and convert them to a linear equation (a*x - b) while adding a small + amount of noise 0.00001 while dividing that term to an exponential of the linear equation multiplied by the + d constant for the appropriate dimensions. + """ + num = 0.00001 + torch.abs(a * z - b) + den = 0.00001 * d + torch.abs(1.0000 - torch.exp(-d * (a * z - b))) + return torch.divide(num, den) \ No newline at end of file diff --git a/whobpyt/models/RWWEI2/Multimodal_RWWEI2.py b/whobpyt/models/RWWEI2/Multimodal_RWWEI2.py index 3b027d56..87c10a76 100644 --- a/whobpyt/models/RWWEI2/Multimodal_RWWEI2.py +++ b/whobpyt/models/RWWEI2/Multimodal_RWWEI2.py @@ -6,24 +6,26 @@ from whobpyt.models.BOLD import BOLD_Layer, BOLD_Params from whobpyt.models.EEG import EEG_Layer, EEG_Params -class RWWEI2_EEG_BOLD(RWWEI2): +class RWWEI2_EEG_BOLD(torch.nn.Module): model_name = "RWWEI2_EEG_BOLD" def __init__(self, num_regions, num_channels, paramsNode, paramsEEG, paramsBOLD, Con_Mtx, dist_mtx, step_size, sim_len, device = torch.device('cpu')): - + super(RWWEI2_EEG_BOLD, self).__init__() # To inherit parameters attribute self.eeg = EEG_Layer(num_regions, paramsEEG, num_channels, device = device) self.bold = BOLD_Layer(num_regions, paramsBOLD, device = device) - super(RWWEI2_EEG_BOLD, self).__init__(num_regions, paramsNode, Con_Mtx, dist_mtx, step_size, useBC = False, device = device) - + self.NMM = RWWEI2(num_regions, paramsNode, Con_Mtx, dist_mtx, step_size, sim_len=sim_len, useBC = False, device = device) + self.params = paramsNode self.node_size = num_regions self.step_size = step_size self.sim_len = sim_len - + self.output_size = num_regions self.device = device self.batch_size = 1 - + self.use_fit_gains = self.NMM.use_fit_gains # flag for fitting gains + self.use_fit_lfm = self.NMM.use_fit_lfm + self.useBC =self.NMM.useBC self.tr = sim_len self.steps_per_TR = 1 self.TRs_per_window = 1 @@ -31,64 +33,66 @@ def __init__(self, num_regions, num_channels, paramsNode, paramsEEG, paramsBOLD, self.state_names = ['E', 'I', 'x', 'f', 'v', 'q'] self.output_names = ["bold", "eeg"] self.track_params = [] #Is populated during setModelParameters() + self.params_fitted={} + self.params_fitted['modelparameter'] =[] + self.params_fitted['hyperparameter'] =[] - self.setModelParameters() - + self.NMM.setModelParameters() + + self.eeg.setModelParameters() + self.bold.setModelParameters() + + self.params_fitted['modelparameter'] = self.NMM.params_fitted['modelparameter'] + self.eeg.params_fitted['modelparameter'] +self.bold.params_fitted['modelparameter'] + self.params_fitted['hyperparameter'] = self.NMM.params_fitted['hyperparameter'] + self.eeg.params_fitted['hyperparameter'] +self.bold.params_fitted['hyperparameter'] + self.track_params = self.NMM.track_params + self.eeg.track_params +self.bold.track_params def info(self): return {"state_names": ['E', 'I', 'x', 'f', 'v', 'q'], "output_names": ["bold", "eeg"]} #TODO: Update to take multiple output names - def setModelParameters(self): - return setModelParameters(self) + def createIC(self, ver): - return createIC(self, ver) + self.NMM.next_start_state = 0.2 * torch.rand((self.node_size, 6, self.batch_size)) \ + + torch.tensor([[0], [0], [0], [1.0], [1.0], [1.0]]).repeat(self.node_size, 1, self.batch_size) + self.NMM.next_start_state = self.NMM.next_start_state.to(self.device) + + return self.NMM.next_start_state + + def createDelayIC(self, ver): + # Creates a time series of state variables to represent their past values as needed when delays are used. + + return torch.tensor(1.0) #Dummy variable if delays are not used + def setBlocks(self, num_blocks): self.num_blocks = num_blocks self.eeg.num_blocks = num_blocks self.bold.num_blocks = num_blocks - def forward(self, external, hx, hE, setNoise = None): - return forward(self, external, hx, hE, setNoise) - -def setModelParameters(self): - super(RWWEI2_EEG_BOLD, self).setModelParameters() # Currently this is the only one with parameters being fitted - self.eeg.setModelParameters() - self.bold.setModelParameters() - -def createIC(self, ver): - #super(RWWEI2_EEG_BOLD, self).createIC() - #self.eeg.createIC() - #self.bold.createIC() - - self.next_start_state = 0.2 * torch.rand((self.node_size, 6, self.batch_size)) + torch.tensor([[0], [0], [0], [1.0], [1.0], [1.0]]).repeat(self.node_size, 1, self.batch_size) - self.next_start_state = self.next_start_state.to(self.device) + def forward(self, external, hx, hE, setNoise=None): + + NMM_vals, hE = self.NMM.forward(external, self.NMM.next_start_state[:, 0:2, :], hE, setNoise) #TODO: Fix the hx in the future + print(NMM_vals['E'].shape) + EEG_vals, hE = self.eeg.forward(self.step_size, self.sim_len, NMM_vals["E"].permute((1,0,2))) + BOLD_vals, hE = self.bold.forward(self.NMM.next_start_state[:, 2:6, :], self.step_size, self.sim_len, NMM_vals["E"].permute((1,0,2))) + + self.NMM.next_start_state = torch.cat((NMM_vals["NMM_state"], BOLD_vals["BOLD_state"]), dim=1).detach() + + sim_vals = {**NMM_vals, **EEG_vals, **BOLD_vals} + sim_vals['current_state'] = torch.tensor(1.0, device = self.device) #Dummy variable + + # Reshape if Blocking is being Used + if self.NMM.num_blocks > 1: + for simKey in set(self.state_names + self.output_names): + #print(sim_vals[simKey].shape) # Nodes x Time x Blocks + #print(torch.unsqueeze(sim_vals[simKey].permute((1,0,2)),2).shape) #Time x Nodes x SV x Blocks + sim_vals[simKey] = serializeTS(torch.unsqueeze(sim_vals[simKey].permute((1,0,2)),2), sim_vals[simKey].shape[0], 1).permute((1,0,2))[:,:,0] + else: + for simKey in set(self.state_names + self.output_names): + sim_vals[simKey] = sim_vals[simKey][:,:,0] + + return sim_vals, hE - return self.next_start_state - -def forward(self, external, hx, hE, setNoise): - - NMM_vals, hE = super(RWWEI2_EEG_BOLD, self).forward(external, self.next_start_state[:, 0:2, :], hE, setNoise) #TODO: Fix the hx in the future - EEG_vals, hE = self.eeg.forward(self.step_size, self.sim_len, NMM_vals["E"].permute((1,0,2))) - BOLD_vals, hE = self.bold.forward(self.next_start_state[:, 2:6, :], self.step_size, self.sim_len, NMM_vals["E"].permute((1,0,2))) - - self.next_start_state = torch.cat((NMM_vals["NMM_state"], BOLD_vals["BOLD_state"]), dim=1).detach() - - sim_vals = {**NMM_vals, **EEG_vals, **BOLD_vals} - sim_vals['current_state'] = torch.tensor(1.0, device = self.device) #Dummy variable - - # Reshape if Blocking is being Used - if self.num_blocks > 1: - for simKey in set(self.state_names + self.output_names): - #print(sim_vals[simKey].shape) # Nodes x Time x Blocks - #print(torch.unsqueeze(sim_vals[simKey].permute((1,0,2)),2).shape) #Time x Nodes x SV x Blocks - sim_vals[simKey] = serializeTS(torch.unsqueeze(sim_vals[simKey].permute((1,0,2)),2), sim_vals[simKey].shape[0], 1).permute((1,0,2))[:,:,0] - else: - for simKey in set(self.state_names + self.output_names): - sim_vals[simKey] = sim_vals[simKey][:,:,0] - - return sim_vals, hE def blockTS(data, blocks, numNodes, numSV): diff --git a/whobpyt/models/RWWEI2/RWWEI2.py b/whobpyt/models/RWWEI2/RWWEI2.py index de2fc22d..478c42b3 100644 --- a/whobpyt/models/RWWEI2/RWWEI2.py +++ b/whobpyt/models/RWWEI2/RWWEI2.py @@ -1,8 +1,9 @@ import torch from whobpyt.datatypes import AbstractNMM, AbstractParams, par +from torch.nn.parameter import Parameter from math import sqrt -class RWWEI2(AbstractNMM): +class RWWEI2(torch.nn.Module): ''' Reduced Wong Wang Excitatory Inhibatory (RWWEXcInh) Model - Version 2 @@ -112,13 +113,45 @@ def info(self): return {"state_names": ['E', 'I'], "output_names": ['E']} def setModelParameters(self): - return setModelParameters(self) + # NOTE: Parameters stored in self.params + + # To make the parameters work with modelfitting.py + param_reg = [torch.nn.Parameter(torch.tensor(1., device = self.device))] + param_hyper = [torch.nn.Parameter(torch.tensor(1., device = self.device))] + vars_names = [a for a in dir(self.params) if (type(getattr(self.params, a)) == par)] + for var_name in vars_names: + var = getattr(self.params, var_name) + if (var.fit_par): + if var_name == 'lm': + size = var.val.shape + var.val = Parameter(- 1 * torch.ones((size[0], size[1]))) + var.prior_mean = Parameter(var.prior_mean) + var.prior_var = Parameter(var.prior_var) + param_reg.append(var.val) + if var.fit_hyper: + param_hyper.append(var.prior_mean) + param_hyper.append(var.prior_var) + self.track_params.append(var_name) + else: + var.val = Parameter(var.val) # TODO: This is not consistent with what user would expect giving a variance + var.prior_mean = Parameter(var.prior_mean) + var.prior_var = Parameter(var.prior_var) + param_reg.append(var.val) + if var.fit_hyper: + param_hyper.append(var.prior_mean) + param_hyper.append(var.prior_var) + self.track_params.append(var_name) + self.params_fitted = {'modelparameter': param_reg, 'hyperparameter': param_hyper} def createIC(self, ver): self.next_start_state = torch.tensor(0.1) + 0.2 * torch.rand((self.node_size, 2, self.num_blocks)) self.next_start_state = self.next_start_state.to(self.device) return self.next_start_state + def createDelayIC(self, ver): + # Creates a time series of state variables to represent their past values as needed when delays are used. + + return torch.tensor(1.0) #Dummy variable if delays are not used def setBlocks(self, num_blocks): self.num_blocks = num_blocks @@ -152,228 +185,211 @@ def genNoise(self, block_len, batched = False): def forward(self, external, hx, hE, setNoise = None, batched = False): ''' ''' - return forward(self, external, hx, hE, setNoise, batched) - -def setModelParameters(self): - - # NOTE: Parameters stored in self.params - - # To make the parameters work with modelfitting.py - param_reg = [torch.nn.Parameter(torch.tensor(1., device = self.device))] - param_hyper = [torch.nn.Parameter(torch.tensor(1., device = self.device))] - vars_names = [a for a in dir(self.params) if (type(getattr(self.params, a)) == par)] - for var_name in vars_names: - var = getattr(self.params, var_name) - if (var.fit_par): - param_reg.append(var.val) - self.track_params.append(var_name) - self.params_fitted = {'modelparameter': param_reg, 'hyperparameter': param_hyper} - - - -def forward(self, external, hx, hE, setNoise, batched): + # Some documentation to be written... - # Some documentation to be written... - - # Defining NMM Parameters to simplify later equations - G = self.params.G.value() - Lambda = self.params.Lambda.value() # 1 or 0 depending on using long range feed forward inhibition (FFI) - a_E = self.params.a_E.value() # nC^(-1) - b_E = self.params.b_E.value() # Hz - d_E = self.params.d_E.value() # s - tau_E = self.params.tau_E.value() # ms - tau_NMDA = self.params.tau_NMDA.value() # ms - W_E = self.params.W_E.value() - a_I = self.params.a_I.value() # nC^(-1) - b_I = self.params.b_I.value() # Hz - d_I = self.params.d_I.value() # s - tau_I = self.params.tau_I.value() # ms - tau_GABA = self.params.tau_GABA.value() # ms - W_I = self.params.W_I.value() - w_plus = self.params.w_plus.value() # Local excitatory recurrence - J_NMDA = self.params.J_NMDA.value() # Excitatory synaptic coupling in nA - J = torch.unsqueeze(self.params.J.value(),1) # NOTE: Extra dimension allows for local tuning if applicable # Local feedback inhibitory synaptic coupling. 1 in no-FIC case, different in FIC case #TODO: Currently set to J_NMDA but should calculate based on paper - gamma = self.params.gamma.value() # a kinetic parameter in ms - sig = self.params.sig.value() # 0.01 # Noise amplitude at node in nA - I_0 = self.params.I_0.value() # The overall effective external input in nA - I_external = self.params.I_external.value() #External input current - gammaI = self.params.gammaI.value() - J_new = self.params.J_new.value() - - const51=torch.tensor([51], device = self.device) - - def H_for_E_V3(I_E, update = False): + # Defining NMM Parameters to simplify later equations + G = self.params.G.value() + Lambda = self.params.Lambda.value() # 1 or 0 depending on using long range feed forward inhibition (FFI) + a_E = self.params.a_E.value() # nC^(-1) + b_E = self.params.b_E.value() # Hz + d_E = self.params.d_E.value() # s + tau_E = self.params.tau_E.value() # ms + tau_NMDA = self.params.tau_NMDA.value() # ms + W_E = self.params.W_E.value() + a_I = self.params.a_I.value() # nC^(-1) + b_I = self.params.b_I.value() # Hz + d_I = self.params.d_I.value() # s + tau_I = self.params.tau_I.value() # ms + tau_GABA = self.params.tau_GABA.value() # ms + W_I = self.params.W_I.value() + w_plus = self.params.w_plus.value() # Local excitatory recurrence + J_NMDA = self.params.J_NMDA.value() # Excitatory synaptic coupling in nA + J = torch.unsqueeze(self.params.J.value(),1) # NOTE: Extra dimension allows for local tuning if applicable # Local feedback inhibitory synaptic coupling. 1 in no-FIC case, different in FIC case #TODO: Currently set to J_NMDA but should calculate based on paper + gamma = self.params.gamma.value() # a kinetic parameter in ms + sig = self.params.sig.value() # 0.01 # Noise amplitude at node in nA + I_0 = self.params.I_0.value() # The overall effective external input in nA + I_external = self.params.I_external.value() #External input current + gammaI = self.params.gammaI.value() + J_new = self.params.J_new.value() - numer = torch.abs(a_E*I_E - b_E) + 1e-9*1 - denom = torch.where((-d_E*(a_E*I_E - b_E) > 50), - torch.abs(1 - 1e9*(-d_E*(a_E*I_E - b_E))) + 1e-9*d_E, - torch.abs(1 - torch.exp(torch.min(-d_E*(a_E*I_E - b_E), const51))) + 1e-9*d_E) - r_E = numer / denom - - return r_E + const51=torch.tensor([51], device = self.device) + + def H_for_E_V3(I_E, update = False): + + numer = torch.abs(a_E*I_E - b_E) + 1e-9*1 + denom = torch.where((-d_E*(a_E*I_E - b_E) > 50), + torch.abs(1 - 1e9*(-d_E*(a_E*I_E - b_E))) + 1e-9*d_E, + torch.abs(1 - torch.exp(torch.min(-d_E*(a_E*I_E - b_E), const51))) + 1e-9*d_E) + r_E = numer / denom + + return r_E + + def H_for_I_V3(I_I, update = False): + + numer = torch.abs(a_I*I_I - b_I) + 1e-9*1 + denom = torch.where((-d_I*(a_I*I_I - b_I) > 50), + torch.abs(1 - 1e5*(-d_I*(a_I*I_I - b_I))) + 1e-9*d_I, + torch.abs(1 - torch.exp(torch.min(-d_I*(a_I*I_I - b_I), const51))) + 1e-9*d_I) + r_I = numer / denom + + return r_I + + + init_state = hx + sim_len = self.sim_len + useDelays = False + useLaplacian = False + withOptVars = False + debug = False + + + # Runs the RWW Model + # + # INPUT + # init_state: Tensor [regions, state_vars] # Regions is number of nodes and should match self.num_regions. There are 2 state variables. + # sim_len: Int - The length of time to simulate in msec + # withOptVars: Boolean - Whether to include the Current and Firing rate variables of excitatory and inhibitory populations in layer_history + # + # OUTPUT + # state_vars: Tensor - [regions, state_vars] + # layer_history: Tensor - [time_steps, regions, state_vars (+ opt_params)] + # - def H_for_I_V3(I_I, update = False): + if batched: + num_steps = int((sim_len/self.step_size)) + else: + num_steps = int((sim_len/self.step_size)/self.num_blocks) + print(num_steps) + if setNoise is not None: + Ev = setNoise["E"] + Iv = setNoise["I"] + state_hist = torch.zeros(num_steps, self.num_regions, 2, self.num_blocks, device = self.device) #TODO: Deal with the dimensions + if(withOptVars): + opt_hist = torch.zeros(num_steps, self.num_regions, 4, self.num_blocks, device = self.device) + else: + Ev = torch.normal(0,1,size = (len(torch.arange(0, sim_len, self.step_size)), self.num_regions, self.num_blocks)).to(self.device) + Iv = torch.normal(0,1,size = (len(torch.arange(0, sim_len, self.step_size)), self.num_regions, self.num_blocks)).to(self.device) + state_hist = torch.zeros(int(sim_len/self.step_size), self.num_regions, 2, self.num_blocks).to(self.device) + if(withOptVars): + opt_hist = torch.zeros(int(sim_len/self.step_size), self.num_regions, 4, self.num_blocks).to(self.device) - numer = torch.abs(a_I*I_I - b_I) + 1e-9*1 - denom = torch.where((-d_I*(a_I*I_I - b_I) > 50), - torch.abs(1 - 1e5*(-d_I*(a_I*I_I - b_I))) + 1e-9*d_I, - torch.abs(1 - torch.exp(torch.min(-d_I*(a_I*I_I - b_I), const51))) + 1e-9*d_I) - r_I = numer / denom + ones_row = torch.ones(1, self.num_blocks, device = self.device) - return r_I - - - init_state = hx - sim_len = self.sim_len - useDelays = False - useLaplacian = False - withOptVars = False - debug = False - + # RWW and State Values + S_E = init_state[:, 0, :] + S_I = init_state[:, 1, :] + + for i in range(num_steps): - # Runs the RWW Model - # - # INPUT - # init_state: Tensor [regions, state_vars] # Regions is number of nodes and should match self.num_regions. There are 2 state variables. - # sim_len: Int - The length of time to simulate in msec - # withOptVars: Boolean - Whether to include the Current and Firing rate variables of excitatory and inhibitory populations in layer_history - # - # OUTPUT - # state_vars: Tensor - [regions, state_vars] - # layer_history: Tensor - [time_steps, regions, state_vars (+ opt_params)] - # - - if batched: - num_steps = int((sim_len/self.step_size)) - else: - num_steps = int((sim_len/self.step_size)/self.num_blocks) - - if setNoise is not None: - Ev = setNoise["E"] - Iv = setNoise["I"] - state_hist = torch.zeros(num_steps, self.num_regions, 2, self.num_blocks, device = self.device) #TODO: Deal with the dimensions - if(withOptVars): - opt_hist = torch.zeros(num_steps, self.num_regions, 4, self.num_blocks, device = self.device) - else: - Ev = torch.normal(0,1,size = (len(torch.arange(0, sim_len, self.step_size)), self.num_regions, self.num_blocks)).to(self.device) - Iv = torch.normal(0,1,size = (len(torch.arange(0, sim_len, self.step_size)), self.num_regions, self.num_blocks)).to(self.device) - state_hist = torch.zeros(int(sim_len/self.step_size), self.num_regions, 2, self.num_blocks).to(self.device) - if(withOptVars): - opt_hist = torch.zeros(int(sim_len/self.step_size), self.num_regions, 4, self.num_blocks).to(self.device) + if((not useDelays) & (not useLaplacian)): + Network_S_E = torch.matmul(self.Con_Mtx, S_E) - ones_row = torch.ones(1, self.num_blocks, device = self.device) + if(useDelays & (not useLaplacian)): + # WARNING: This has not been tested + + speed = (1.5 + torch.nn.functional.relu(self.mu)) * (self.step_size * 0.001) + self.delays_idx = (self.Dist_Mtx / speed).type(torch.int64) #TODO: What is the units of the distance matrix then? Needs to be in meters? - # RWW and State Values - S_E = init_state[:, 0, :] - S_I = init_state[:, 1, :] - - for i in range(num_steps): - - if((not useDelays) & (not useLaplacian)): - Network_S_E = torch.matmul(self.Con_Mtx, S_E) - - if(useDelays & (not useLaplacian)): - # WARNING: This has not been tested - - speed = (1.5 + torch.nn.functional.relu(self.mu)) * (self.step_size * 0.001) - self.delays_idx = (self.Dist_Mtx / speed).type(torch.int64) #TODO: What is the units of the distance matrix then? Needs to be in meters? - - S_E_history_new = self.delayed_S_E # TODO: Check if this needs to be cloned to work - S_E_delayed_Mtx = S_E_history_new.gather(0, (self.buffer_idx - self.delays_idx)%self.buffer_len) # delayed E #TODO: Is distance matrix symmetric, should this be transposed? - - Delayed_S_E = torch.sum(torch.mul(self.Con_Mtx, S_E_delayed_Mtx), 1) # weights on delayed E - - Network_S_E = Delayed_S_E - - if(useLaplacian & (not useDelays)): - # WARNING: This has not been tested - - # NOTE: We are acutally using the NEGATIVE Laplacian - - Laplacian_diagonal = -torch.diag(torch.sum(self.Con_Mtx, axis=1)) #Con_Mtx should be normalized, so this should just add a diagonal of -1's - S_E_laplacian = torch.matmul(self.Con_Mtx + Laplacian_diagonal, S_E) - - Network_S_E = S_E_laplacian - - if(useDelays & useLaplacian): - # WARNING: This has not been tested + S_E_history_new = self.delayed_S_E # TODO: Check if this needs to be cloned to work + S_E_delayed_Mtx = S_E_history_new.gather(0, (self.buffer_idx - self.delays_idx)%self.buffer_len) # delayed E #TODO: Is distance matrix symmetric, should this be transposed? + + Delayed_S_E = torch.sum(torch.mul(self.Con_Mtx, S_E_delayed_Mtx), 1) # weights on delayed E + + Network_S_E = Delayed_S_E + + if(useLaplacian & (not useDelays)): + # WARNING: This has not been tested + + # NOTE: We are acutally using the NEGATIVE Laplacian + + Laplacian_diagonal = -torch.diag(torch.sum(self.Con_Mtx, axis=1)) #Con_Mtx should be normalized, so this should just add a diagonal of -1's + S_E_laplacian = torch.matmul(self.Con_Mtx + Laplacian_diagonal, S_E) + + Network_S_E = S_E_laplacian + + if(useDelays & useLaplacian): + # WARNING: This has not been tested + + # NOTE: We are acutally using the NEGATIVE Laplacian + + Laplacian_diagonal = -torch.diag(torch.sum(self.Con_Mtx, axis=1)) #Con_Mtx should be normalized, so this should just add a diagonal of -1's + + speed = (1.5 + torch.nn.functional.relu(self.mu)) * (self.step_size * 0.001) + self.delays_idx = (self.Dist_Mtx / speed).type(torch.int64) #TODO: What is the units of the distance matrix then? + + S_E_history_new = self.delayed_S_E # TODO: Check if this needs to be cloned to work + S_E_delayed_Mtx = S_E_history_new.gather(0, (self.buffer_idx - self.delays_idx)%self.buffer_len) + + S_E_delayed_Vector = torch.sum(torch.mul(self.Con_Mtx, S_E_delayed_Mtx), 1) # weights on delayed E + + Delayed_Laplacian_S_E = (S_E_delayed_Vector + torch.matmul(Laplacian_diagonal, S_E)) + + Network_S_E = Delayed_Laplacian_S_E + + # Currents + I_E = W_E*I_0 + w_plus*J_NMDA*S_E + G*J_NMDA*Network_S_E - torch.matmul(J,ones_row)*S_I + I_external # J * S_I updated to allow for local J fitting in parallel for FNGFPG + I_I = W_I*I_0 + J_NMDA*S_E - J_new*S_I + Lambda*G*J_NMDA*Network_S_E - # NOTE: We are acutally using the NEGATIVE Laplacian - - Laplacian_diagonal = -torch.diag(torch.sum(self.Con_Mtx, axis=1)) #Con_Mtx should be normalized, so this should just add a diagonal of -1's - - speed = (1.5 + torch.nn.functional.relu(self.mu)) * (self.step_size * 0.001) - self.delays_idx = (self.Dist_Mtx / speed).type(torch.int64) #TODO: What is the units of the distance matrix then? + # Firing Rates + # Orig + #r_E = (self.a_E*I_E - self.b_E) / (1 - torch.exp(-self.d_E*(self.a_E*I_E - self.b_E))) + #r_I = (self.a_I*I_I - self.b_I) / (1 - torch.exp(-self.d_I*(self.a_I*I_I - self.b_I))) + # + # EDIT5: Version to address torch.exp() returning nan and prevent gradient returning 0, and works with autograd + r_E = H_for_E_V3(I_E) + r_I = H_for_I_V3(I_I) - S_E_history_new = self.delayed_S_E # TODO: Check if this needs to be cloned to work - S_E_delayed_Mtx = S_E_history_new.gather(0, (self.buffer_idx - self.delays_idx)%self.buffer_len) + # Average Synaptic Gating Variable + dS_E = - S_E/tau_E + (1 - S_E)*gamma*r_E #+ self.sig*v_of_T[i, :] Noise now added later + dS_I = - S_I/tau_I + gammaI*r_I #+ self.sig*v_of_T[i, :] Noise now added later + + # UPDATE VALUES + S_E = S_E + self.step_size*dS_E + sqrt(self.step_size)*sig*Ev[i, :, :] + S_I = S_I + self.step_size*dS_I + sqrt(self.step_size)*sig*Iv[i, :, :] + + # Bound the possible values of state variables (From fit.py code for numerical stability) + if(self.useBC): + S_E = torch.tanh(0.00001 + torch.nn.functional.relu(S_E - 0.00001)) + S_I = torch.tanh(0.00001 + torch.nn.functional.relu(S_I - 0.00001)) - S_E_delayed_Vector = torch.sum(torch.mul(self.Con_Mtx, S_E_delayed_Mtx), 1) # weights on delayed E + state_hist[i, :, 0, :] = S_E + state_hist[i, :, 1, :] = S_I + + if useDelays: + self.delayed_S_E = self.delayed_S_E.clone(); self.delayed_S_E[self.buffer_idx, :] = S_E #TODO: This means that not back-propagating the network just the individual nodes + + if (self.buffer_idx == (self.buffer_len - 1)): + self.buffer_idx = 0 + else: + self.buffer_idx = self.buffer_idx + 1 - Delayed_Laplacian_S_E = (S_E_delayed_Vector + torch.matmul(Laplacian_diagonal, S_E)) + if(withOptVars): + opt_hist[i, :, 0, :] = I_I + opt_hist[i, :, 1, :] = I_E + opt_hist[i, :, 2, :] = r_I + opt_hist[i, :, 3, :] = r_E - Network_S_E = Delayed_Laplacian_S_E - - # Currents - I_E = W_E*I_0 + w_plus*J_NMDA*S_E + G*J_NMDA*Network_S_E - torch.matmul(J,ones_row)*S_I + I_external # J * S_I updated to allow for local J fitting in parallel for FNGFPG - I_I = W_I*I_0 + J_NMDA*S_E - J_new*S_I + Lambda*G*J_NMDA*Network_S_E - - # Firing Rates - # Orig - #r_E = (self.a_E*I_E - self.b_E) / (1 - torch.exp(-self.d_E*(self.a_E*I_E - self.b_E))) - #r_I = (self.a_I*I_I - self.b_I) / (1 - torch.exp(-self.d_I*(self.a_I*I_I - self.b_I))) - # - # EDIT5: Version to address torch.exp() returning nan and prevent gradient returning 0, and works with autograd - r_E = H_for_E_V3(I_E) - r_I = H_for_I_V3(I_I) + state_vals = torch.cat((torch.unsqueeze(S_E, 1), torch.unsqueeze(S_I, 1)), 1) - # Average Synaptic Gating Variable - dS_E = - S_E/tau_E + (1 - S_E)*gamma*r_E #+ self.sig*v_of_T[i, :] Noise now added later - dS_I = - S_I/tau_I + gammaI*r_I #+ self.sig*v_of_T[i, :] Noise now added later - - # UPDATE VALUES - S_E = S_E + self.step_size*dS_E + sqrt(self.step_size)*sig*Ev[i, :, :] - S_I = S_I + self.step_size*dS_I + sqrt(self.step_size)*sig*Iv[i, :, :] - - # Bound the possible values of state variables (From fit.py code for numerical stability) - if(self.useBC): - S_E = torch.tanh(0.00001 + torch.nn.functional.relu(S_E - 0.00001)) - S_I = torch.tanh(0.00001 + torch.nn.functional.relu(S_I - 0.00001)) + # So that RAM does not accumulate in later batches/epochs + # & Because can't back pass through twice + self.delayed_S_E = self.delayed_S_E.detach() + + if(withOptVars): + layer_hist = torch.cat((state_hist, opt_hist), 2) + else: + layer_hist = state_hist - state_hist[i, :, 0, :] = S_E - state_hist[i, :, 1, :] = S_I + sim_vals = {} + sim_vals["NMM_state"] = state_vals + sim_vals["E"] = layer_hist[:,:,0,:].permute((1,0,2)) + sim_vals["I"] = layer_hist[:,:,1,:].permute((1,0,2)) + print('layer in NMM',layer_hist.shape) + return sim_vals, hE - if useDelays: - self.delayed_S_E = self.delayed_S_E.clone(); self.delayed_S_E[self.buffer_idx, :] = S_E #TODO: This means that not back-propagating the network just the individual nodes - if (self.buffer_idx == (self.buffer_len - 1)): - self.buffer_idx = 0 - else: - self.buffer_idx = self.buffer_idx + 1 - - if(withOptVars): - opt_hist[i, :, 0, :] = I_I - opt_hist[i, :, 1, :] = I_E - opt_hist[i, :, 2, :] = r_I - opt_hist[i, :, 3, :] = r_E - - state_vals = torch.cat((torch.unsqueeze(S_E, 1), torch.unsqueeze(S_I, 1)), 1) - - # So that RAM does not accumulate in later batches/epochs - # & Because can't back pass through twice - self.delayed_S_E = self.delayed_S_E.detach() - if(withOptVars): - layer_hist = torch.cat((state_hist, opt_hist), 2) - else: - layer_hist = state_hist - - sim_vals = {} - sim_vals["NMM_state"] = state_vals - sim_vals["E"] = layer_hist[:,:,0,:].permute((1,0,2)) - sim_vals["I"] = layer_hist[:,:,1,:].permute((1,0,2)) - return sim_vals, hE diff --git a/whobpyt/optimization/custom_cost_RWW.py b/whobpyt/optimization/custom_cost_RWW.py index 19ff4ed3..6dfbdcb3 100644 --- a/whobpyt/optimization/custom_cost_RWW.py +++ b/whobpyt/optimization/custom_cost_RWW.py @@ -49,42 +49,7 @@ def loss(self, simData: dict, empData: torch.Tensor): v_window = state_vals['v'] x_window = state_vals['x'] q_window = state_vals['q'] - if model.use_Gaussian_EI and model.use_Bifurcation: - loss_EI = torch.mean(model.E_v_inv * (E_window - model.E_m) ** 2) \ - + torch.mean(-torch.log(model.E_v_inv)) + \ - torch.mean(model.I_v_inv * (I_window - model.I_m) ** 2) \ - + torch.mean(-torch.log(model.I_v_inv)) + \ - torch.mean(model.q_v_inv * (q_window - model.q_m) ** 2) \ - + torch.mean(-torch.log(model.q_v_inv)) + \ - torch.mean(model.v_v_inv * (v_window - model.v_m) ** 2) \ - + torch.mean(-torch.log(model.v_v_inv)) \ - + 5.0 * (m(model.sup_ca) * m(model.g_IE) ** 2 - - m(model.sup_cb) * m(model.params.g_IE.value()) - + m(model.sup_cc) - m(model.params.g_EI.value())) ** 2 - if model.use_Gaussian_EI and not model.use_Bifurcation: - loss_EI = torch.mean(model.E_v_inv * (E_window - model.E_m) ** 2) \ - + torch.mean(-torch.log(model.E_v_inv)) + \ - torch.mean(model.I_v_inv * (I_window - model.I_m) ** 2) \ - + torch.mean(-torch.log(model.I_v_inv)) + \ - torch.mean(model.q_v_inv * (q_window - model.q_m) ** 2) \ - + torch.mean(-torch.log(model.q_v_inv)) + \ - torch.mean(model.v_v_inv * (v_window - model.v_m) ** 2) \ - + torch.mean(-torch.log(model.v_v_inv)) - - if not model.use_Gaussian_EI and model.use_Bifurcation: - loss_EI = .1 * torch.mean( - torch.mean(E_window * torch.log(E_window) + (1 - E_window) * torch.log(1 - E_window) \ - + 0.5 * I_window * torch.log(I_window) + 0.5 * (1 - I_window) * torch.log( - 1 - I_window), dim=1)) + \ - + 5.0 * (m(model.sup_ca) * m(model.params.g_IE.value()) ** 2 - - m(model.sup_cb) * m(model.params.g_IE.value()) - + m(model.sup_cc) - m(model.params.g_EI.value())) ** 2 - - if not model.use_Gaussian_EI and not model.use_Bifurcation: - loss_EI = .1 * torch.mean( - torch.mean(E_window * torch.log(E_window) + (1 - E_window) * torch.log(1 - E_window) \ - + 0.5 * I_window * torch.log(I_window) + 0.5 * (1 - I_window) * torch.log( - 1 - I_window), dim=1)) + loss_prior = [] @@ -93,19 +58,10 @@ def loss(self, simData: dict, empData: torch.Tensor): for var_name in variables_p: # print(var) var = getattr(model.params, var_name) - if model.use_Bifurcation: - if var.has_prior and var_name not in ['std_in', 'g_EI', 'g_IE'] and \ - var_name not in exclude_param: - loss_prior.append(torch.sum((lb + m(var.prior_var)) * \ - (m(var.val) - m(var.prior_mean)) ** 2) \ - + torch.sum(-torch.log(lb + m(var.prior_var)))) #TODO: Double check about converting _v_inv to just variance representation - else: - if var.has_prior and var_name not in ['std_in'] and \ - var_name not in exclude_param: - loss_prior.append(torch.sum((lb + m(var.prior_var)) * \ - (m(var.val) - m(var.prior_mean)) ** 2) \ - + torch.sum(-torch.log(lb + m(var.prior_var)))) #TODO: Double check about converting _v_inv to just variance representation + if var.has_prior and var_name not in ['std_in'] and var_name not in exclude_param: + loss_prior.append(torch.sum((lb + m(var.prior_var)) * (m(var.val) - m(var.prior_mean)) ** 2) \ + + torch.sum(-torch.log(lb + m(var.prior_var)))) #TODO: Double check about converting _v_inv to just variance representation # total loss - loss = w_cost * loss_main + sum(loss_prior) + 1 * loss_EI + loss = w_cost * loss_main + sum(loss_prior) return loss diff --git a/whobpyt/run/customfitting.py b/whobpyt/run/customfitting.py index 77e8164b..d9b40fd9 100644 --- a/whobpyt/run/customfitting.py +++ b/whobpyt/run/customfitting.py @@ -91,7 +91,7 @@ def train(self, stim, empDatas, num_epochs, block_len, learningrate = 0.05, rese # initials of history of E delayHist = self.model.createDelayIC(ver = 0) #TODO: Delays are currently is not implemented in various places else: - firstIC = self.model.next_start_state + firstIC = self.model.NMM.next_start_state delayHist = torch.tensor(1.0, device = self.device) # TODO: Delays are currently is not implemented in various places # initial the external inputs @@ -100,7 +100,7 @@ def train(self, stim, empDatas, num_epochs, block_len, learningrate = 0.05, rese num_blocks = int(self.model.sim_len/block_len) ## Noise for the epoch (which has 1 batch) - [serialNoise, blockNoise] = self.model.genNoise(block_len) + [serialNoise, blockNoise] = self.model.NMM.genNoise(block_len) with torch.no_grad(): sim_vals, delayHist = self.model.forward(external, firstIC, delayHist, serialNoise) @@ -127,9 +127,9 @@ def train(self, stim, empDatas, num_epochs, block_len, learningrate = 0.05, rese nextICs = ICs[:,:,[-1]] #Brackets are to keep the dimension self.model.setBlocks(num_blocks) - self.model.next_start_state = newICs + self.model.NMM.next_start_state = newICs sim_vals, delayHist = self.model.forward(external, newICs, delayHist, blockNoise) - self.model.next_start_state = nextICs #TODO: Update to deal with delays as well + self.model.NMM.next_start_state = nextICs #TODO: Update to deal with delays as well print("Blocked Finished")