Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

moving common methods from models to abstract #173

Merged
merged 9 commits into from
Feb 3, 2024
Merged
Show file tree
Hide file tree
Changes from 8 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion examples/eg002r__multimodal_simulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,10 @@
# Importage
# ---------------------------------------------------
#

# os stuff
import os
import sys
sys.path.append('..')
# whobpyt stuff
import whobpyt
from whobpyt.datatypes import par, Recording
Expand Down
2 changes: 1 addition & 1 deletion examples/eg003r__fitting_rww_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@

# %%
# call model want to fit
model = RNNRWW(node_size, TPperWindow, step_size, repeat_size, tr, sc, True, params)
model = RNNRWW(params, node_size =node_size, TRs_per_window =TPperWindow, step_size=step_size, tr=tr, sc=sc, use_fit_gains=True)

# %%
# create objective function
Expand Down
2 changes: 1 addition & 1 deletion examples/eg004r__fitting_JR_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@

# %%
# call model want to fit
model = RNNJANSEN(node_size, TPperWindow, step_size, output_size, tr, sc, lm, dist, True, False, params)
model = RNNJANSEN(params, node_size=node_size, TRs_per_window=TPperWindow, step_size=step_size, output_size=output_size, tr=tr, sc=sc, lm=lm, dist=dist, use_fit_gains=True, use_fit_lfm = False)

# %%
# create objective function
Expand Down
5 changes: 4 additions & 1 deletion examples/eg005r__gpu_support.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,10 @@
# Importage
# ---------------------------------------------------
#

# os stuff
import os
import sys
sys.path.append('..')
# whobpyt stuff
import whobpyt
from whobpyt.datatypes import par, Recording
Expand Down
6 changes: 4 additions & 2 deletions examples/eg006r__replicate_Momi2023.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@
output_size = eeg_data.shape[0]
batch_size = 20
step_size = 0.0001
num_epoches = 120
num_epoches = 20
tr = 0.001
state_size = 6
base_batch_num = 20
Expand Down Expand Up @@ -132,7 +132,9 @@

# %%
# call model want to fit
model = RNNJANSEN(node_size, batch_size, step_size, output_size, tr, sc, lm, dist, True, False, params)
# call model want to fit
model = RNNJANSEN(params, node_size=node_size, TRs_per_window=batch_size, step_size=step_size, output_size=output_size, tr=tr, sc=sc, lm=lm, dist=dist, use_fit_gains=True, use_fit_lfm = False)



# create objective function
Expand Down
35 changes: 32 additions & 3 deletions whobpyt/datatypes/AbstractNMM.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,13 @@
import torch

from whobpyt.datatypes.parameter import par
from torch.nn.parameter import Parameter
class AbstractNMM(torch.nn.Module):
# This is the abstract class for the models (typically neural mass models) that are being trained.
# The neuroimaging modality might be integrated into the model as well.

def __init__(self):
def __init__(self, params):
super(AbstractNMM, self).__init__() # May not want to enherit from torch.nn.Module in the future
self.params = params

self.state_names = ["None"] # The names of the state variables of the model
self.output_names = ["None"] # The variable to be used as output from the NMM, for purposes such as the input to an objective function
Expand All @@ -25,7 +27,34 @@ def setModelParameters(self):
# Setting the parameters that will be optimized as either model parameters or 2ndLevel/hyper
# parameters (for various optional features).
# This should be called in the __init__() function implementation for convenience if possible.
pass
# If use_fit_lfm is True, set lm as an attribute as type Parameter (containing variance information)
param_reg = []
param_hyper = []

var_names = [a for a in dir(self.params) if (type(getattr(self.params, a)) == par)]
for var_name in var_names:
var = getattr(self.params, var_name)
if (var.fit_hyper):
if var_name == 'lm':
size = var.prior_var.shape
var.val = Parameter(var.val.detach() - 1 * torch.ones((size[0], size[1]))) # TODO: This is not consistent with what user would expect giving a variance
param_hyper.append(var.prior_mean)
param_hyper.append(var.prior_var)
elif (var != 'std_in'):
var.randSet() #TODO: This should be done before giving params to model class
param_hyper.append(var.prior_mean)
param_hyper.append(var.prior_var)

if (var.fit_par):
param_reg.append(var.val) #TODO: This should got before fit_hyper, but need to change where randomness gets added in the code first

if (var.fit_par | var.fit_hyper):
self.track_params.append(var_name) #NMM Parameters

if var_name == 'lm':
setattr(self, var_name, var.val)

self.params_fitted = {'modelparameter': param_reg,'hyperparameter': param_hyper}

def createIC(self, ver):
# Create the initial conditions for the model state variables.
Expand Down
5 changes: 2 additions & 3 deletions whobpyt/models/BOLD/BOLD.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import torch
from whobpyt.datatypes import AbstractMode

class BOLD_Layer(AbstractMode):
class BOLD_Layer(torch.nn.Module):
'''
Balloon-Windkessel Hemodynamic Response Function Forward Model

Expand Down Expand Up @@ -137,5 +137,4 @@ def forward(self, init_state, step_size, sim_len, node_history):
sim_vals["q"] = layer_hist[:, :, 3, :].permute((1,0,2))
sim_vals["bold"] = layer_hist[:, :, 4, :].permute((1,0,2)) # Post Permute: Nodes x Time x Batch

return sim_vals, hE

return sim_vals, hE
2 changes: 1 addition & 1 deletion whobpyt/models/EEG/EEG.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import torch
from whobpyt.datatypes import AbstractMode

class EEG_Layer(AbstractMode):
class EEG_Layer(torch.nn.Module):
'''

Lead Field Matrix multiplication which converts Source Space EEG to Channel Space EEG
Expand Down
9 changes: 9 additions & 0 deletions whobpyt/models/EEG/ParamsEEG.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,4 +9,13 @@ def __init__(self, Lead_Field):
#############################################

self.LF = Lead_Field # This should be [num_regions, num_channels]

def to(self, device):
# Moves all parameters between CPU and GPU

vars_names = [a for a in dir(self) if not a.startswith('__')]
for var_name in vars_names:
var = getattr(self, var_name)
if (type(var) == par):
var.to(device)

73 changes: 17 additions & 56 deletions whobpyt/models/JansenRit/jansen_rit.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,9 +87,10 @@ class RNNJANSEN(AbstractNMM):

"""

def __init__(self, node_size: int,
TRs_per_window: int, step_size: float, output_size: int, tr: float, sc: np.ndarray, lm: np.ndarray, dist: np.ndarray,
use_fit_gains: bool, use_fit_lfm: bool, params: ParamsJR) -> None:
def __init__(self, params: ParamsJR, node_size=200,
TRs_per_window= 20, step_size=0.0001, output_size=64, tr=0.001, sc=np.ones((200,200)), \
lm=np.ones((64,200)), dist=np.ones((200,200)),
use_fit_gains=True, use_fit_lfm=False):
"""
Parameters
----------
Expand Down Expand Up @@ -118,7 +119,7 @@ def __init__(self, node_size: int,
"""
method_arg_type_check(self.__init__) # Check that the passed arguments (excluding self) abide by their expected data types

super(RNNJANSEN, self).__init__()
super(RNNJANSEN, self).__init__(params)
self.state_names = ['E', 'Ev', 'I', 'Iv', 'P', 'Pv']
self.output_names = ["eeg"]
self.track_params = [] #Is populated during setModelParameters()
Expand All @@ -140,18 +141,9 @@ def __init__(self, node_size: int,
self.output_size = lm.shape[0] # number of EEG channels

self.setModelParameters()
self.setModelSCParameters()

def info(self):
# TODO: Make sure this method is useful
"""
Returns a dictionary with the names of the states and the output.

Returns
-------
Dict[str, List[str]]
"""

return {"state_names": ['E', 'Ev', 'I', 'Iv', 'P', 'Pv'], "output_names": ["eeg"]}


def createIC(self, ver):
"""
Expand Down Expand Up @@ -195,14 +187,11 @@ def createDelayIC(self, ver):

return torch.tensor(np.random.uniform(state_lb, state_ub, (self.node_size, delays_max)), dtype=torch.float32)

def setModelParameters(self):
def setModelSCParameters(self):
"""
Sets the parameters of the model.
"""

param_reg = []
param_hyper = []


# Set w_bb, w_ff, and w_ll as attributes as type Parameter if use_fit_gains is True
if self.use_fit_gains:
self.w_bb = Parameter(torch.tensor(np.zeros((self.node_size, self.node_size)) + 0.05, # the backwards gains
Expand All @@ -211,45 +200,15 @@ def setModelParameters(self):
dtype=torch.float32))
self.w_ll = Parameter(torch.tensor(np.zeros((self.node_size, self.node_size)) + 0.05, # the lateral gains
dtype=torch.float32))
param_reg.append(self.w_ll)
param_reg.append(self.w_ff)
param_reg.append(self.w_bb)
self.params_fitted['modelparameter'].append(self.w_ll)
self.params_fitted['modelparameter'].append(self.w_ff)
self.params_fitted['modelparameter'].append(self.w_bb)
else:
self.w_bb = torch.tensor(np.zeros((self.node_size, self.node_size)), dtype=torch.float32)
self.w_ff = torch.tensor(np.zeros((self.node_size, self.node_size)), dtype=torch.float32)
self.w_ll = torch.tensor(np.zeros((self.node_size, self.node_size)), dtype=torch.float32)

# If use_fit_lfm is True, set lm as an attribute as type Parameter (containing variance information)
if self.use_fit_lfm:
self.lm = Parameter(torch.tensor(self.lm, dtype=torch.float32)) # leadfield matrix from sourced data to eeg
param_reg.append(self.lm)
else:
self.lm = torch.tensor(self.lm, dtype=torch.float32)

var_names = [a for a in dir(self.params) if (type(getattr(self.params, a)) == par)]
for var_name in var_names:
var = getattr(self.params, var_name)
if (var.fit_hyper):
if var_name == 'lm':
size = var.prior_var.shape
var.val = Parameter(var.val.detach() - 1 * torch.ones((size[0], size[1]))) # TODO: This is not consistent with what user would expect giving a variance
param_hyper.append(var.prior_mean)
param_hyper.append(var.prior_var)
elif (var != 'std_in'):
var.randSet() #TODO: This should be done before giving params to model class
param_hyper.append(var.prior_mean)
param_hyper.append(var.prior_var)

if (var.fit_par):
param_reg.append(var.val) #TODO: This should got before fit_hyper, but need to change where randomness gets added in the code first

if (var.fit_par | var.fit_hyper):
self.track_params.append(var_name) #NMM Parameters

if var_name == 'lm':
setattr(self, var_name, var.val)

self.params_fitted = {'modelparameter': param_reg,'hyperparameter': param_hyper}



def forward(self, external, hx, hE):
Expand Down Expand Up @@ -313,6 +272,8 @@ def forward(self, external, hx, hE):

g_f = (lb * con_1 + m(self.params.g_f.value()))
g_b = (lb * con_1 + m(self.params.g_b.value()))

lm = self.params.lm.value()


next_state = {}
Expand Down Expand Up @@ -424,9 +385,9 @@ def forward(self, external, hx, hE):
hE = torch.cat([M, hE[:, :-1]], dim=1) # update placeholders for pyramidal buffer

# Capture the states at every tr in the placeholders which is then used in the cost calculation.
lm_t = (self.lm.T / torch.sqrt(self.lm ** 2).sum(1)).T
lm_t = (lm.T / torch.sqrt(lm ** 2).sum(1)).T
self.lm_t = (lm_t - 1 / self.output_size * torch.matmul(torch.ones((1, self.output_size)), lm_t))
temp = cy0 * torch.matmul(self.lm_t, M[:200, :]) - 1 * y0
temp = cy0 * torch.matmul(self.lm_t, E-I) - 1 * y0
eeg_window.append(temp)

# Update the current state.
Expand Down
Loading
Loading