Skip to content

Commit

Permalink
Merge pull request #173 from JohnWangDataAnalyst/dev
Browse files Browse the repository at this point in the history
moving common methods from models to abstract
  • Loading branch information
JohnGriffiths authored Feb 3, 2024
2 parents 53e7bd0 + 9b0d1f0 commit fe40096
Show file tree
Hide file tree
Showing 18 changed files with 421 additions and 493 deletions.
5 changes: 4 additions & 1 deletion examples/eg002r__multimodal_simulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,10 @@
# Importage
# ---------------------------------------------------
#

# os stuff
import os
import sys
sys.path.append('..')
# whobpyt stuff
import whobpyt
from whobpyt.datatypes import par, Recording
Expand Down
2 changes: 1 addition & 1 deletion examples/eg003r__fitting_rww_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@

# %%
# call model want to fit
model = RNNRWW(node_size, TPperWindow, step_size, repeat_size, tr, sc, True, params)
model = RNNRWW(params, node_size =node_size, TRs_per_window =TPperWindow, step_size=step_size, tr=tr, sc=sc, use_fit_gains=True)

# %%
# create objective function
Expand Down
2 changes: 1 addition & 1 deletion examples/eg004r__fitting_JR_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@

# %%
# call model want to fit
model = RNNJANSEN(node_size, TPperWindow, step_size, output_size, tr, sc, lm, dist, True, False, params)
model = RNNJANSEN(params, node_size=node_size, TRs_per_window=TPperWindow, step_size=step_size, output_size=output_size, tr=tr, sc=sc, lm=lm, dist=dist, use_fit_gains=True, use_fit_lfm = False)

# %%
# create objective function
Expand Down
5 changes: 4 additions & 1 deletion examples/eg005r__gpu_support.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,10 @@
# Importage
# ---------------------------------------------------
#

# os stuff
import os
import sys
sys.path.append('..')
# whobpyt stuff
import whobpyt
from whobpyt.datatypes import par, Recording
Expand Down
6 changes: 4 additions & 2 deletions examples/eg006r__replicate_Momi2023.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@
output_size = eeg_data.shape[0]
batch_size = 20
step_size = 0.0001
num_epoches = 120
num_epoches = 20
tr = 0.001
state_size = 6
base_batch_num = 20
Expand Down Expand Up @@ -132,7 +132,9 @@

# %%
# call model want to fit
model = RNNJANSEN(node_size, batch_size, step_size, output_size, tr, sc, lm, dist, True, False, params)
# call model want to fit
model = RNNJANSEN(params, node_size=node_size, TRs_per_window=batch_size, step_size=step_size, output_size=output_size, tr=tr, sc=sc, lm=lm, dist=dist, use_fit_gains=True, use_fit_lfm = False)



# create objective function
Expand Down
35 changes: 32 additions & 3 deletions whobpyt/datatypes/AbstractNMM.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,13 @@
import torch

from whobpyt.datatypes.parameter import par
from torch.nn.parameter import Parameter
class AbstractNMM(torch.nn.Module):
# This is the abstract class for the models (typically neural mass models) that are being trained.
# The neuroimaging modality might be integrated into the model as well.

def __init__(self):
def __init__(self, params):
super(AbstractNMM, self).__init__() # May not want to enherit from torch.nn.Module in the future
self.params = params

self.state_names = ["None"] # The names of the state variables of the model
self.output_names = ["None"] # The variable to be used as output from the NMM, for purposes such as the input to an objective function
Expand All @@ -25,7 +27,34 @@ def setModelParameters(self):
# Setting the parameters that will be optimized as either model parameters or 2ndLevel/hyper
# parameters (for various optional features).
# This should be called in the __init__() function implementation for convenience if possible.
pass
# If use_fit_lfm is True, set lm as an attribute as type Parameter (containing variance information)
param_reg = []
param_hyper = []

var_names = [a for a in dir(self.params) if (type(getattr(self.params, a)) == par)]
for var_name in var_names:
var = getattr(self.params, var_name)
if (var.fit_hyper):
if var_name == 'lm':
size = var.prior_var.shape
var.val = Parameter(var.val.detach() - 1 * torch.ones((size[0], size[1]))) # TODO: This is not consistent with what user would expect giving a variance
param_hyper.append(var.prior_mean)
param_hyper.append(var.prior_var)
elif (var != 'std_in'):
var.randSet() #TODO: This should be done before giving params to model class
param_hyper.append(var.prior_mean)
param_hyper.append(var.prior_var)

if (var.fit_par):
param_reg.append(var.val) #TODO: This should got before fit_hyper, but need to change where randomness gets added in the code first

if (var.fit_par | var.fit_hyper):
self.track_params.append(var_name) #NMM Parameters

if var_name == 'lm':
setattr(self, var_name, var.val)

self.params_fitted = {'modelparameter': param_reg,'hyperparameter': param_hyper}

def createIC(self, ver):
# Create the initial conditions for the model state variables.
Expand Down
5 changes: 2 additions & 3 deletions whobpyt/models/BOLD/BOLD.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import torch
from whobpyt.datatypes import AbstractMode

class BOLD_Layer(AbstractMode):
class BOLD_Layer(torch.nn.Module):
'''
Balloon-Windkessel Hemodynamic Response Function Forward Model
Expand Down Expand Up @@ -137,5 +137,4 @@ def forward(self, init_state, step_size, sim_len, node_history):
sim_vals["q"] = layer_hist[:, :, 3, :].permute((1,0,2))
sim_vals["bold"] = layer_hist[:, :, 4, :].permute((1,0,2)) # Post Permute: Nodes x Time x Batch

return sim_vals, hE

return sim_vals, hE
2 changes: 1 addition & 1 deletion whobpyt/models/EEG/EEG.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import torch
from whobpyt.datatypes import AbstractMode

class EEG_Layer(AbstractMode):
class EEG_Layer(torch.nn.Module):
'''
Lead Field Matrix multiplication which converts Source Space EEG to Channel Space EEG
Expand Down
9 changes: 9 additions & 0 deletions whobpyt/models/EEG/ParamsEEG.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,4 +9,13 @@ def __init__(self, Lead_Field):
#############################################

self.LF = Lead_Field # This should be [num_regions, num_channels]

def to(self, device):
# Moves all parameters between CPU and GPU

vars_names = [a for a in dir(self) if not a.startswith('__')]
for var_name in vars_names:
var = getattr(self, var_name)
if (type(var) == par):
var.to(device)

27 changes: 27 additions & 0 deletions whobpyt/models/HGF/ParamsHGF.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
import torch
from whobpyt.datatypes import AbstractParams, par

class ParamsHGF(AbstractParams):
def __init__(self, **kwargs):

super(ParamsHGF, self).__init__(**kwargs)

params = {

"omega_3": par(0.03), # standard deviation of the Gaussian noise
"omega_2": par(0.02), # standard deviation of the Gaussian noise

"kappa": par(1.), # scale of the external input
"x2mean" : par(1),
"deca2" : par(1),
"deca3" : par(1),
"g_x2_x3" : par(1),
"g_x3_x2" : par(1),
"c" : par(1)
}

for var in params:
if var not in self.params:
self.params[var] = params[var]

self.setParamsAsattr()
2 changes: 2 additions & 0 deletions whobpyt/models/HGF/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
from .hgf import HGF
from .ParamsHGF import ParamsHGF
113 changes: 113 additions & 0 deletions whobpyt/models/HGF/hgf.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
import torch
from torch.nn.parameter import Parameter
from whobpyt.datatypes import AbstractNMM, AbstractParams, par
from whobpyt.models.HGF import ParamsHGF

class HGF(AbstractNMM):
def __init__(self, paramsHGF, TRperWindow = 20, node_size =1, tr=1, step_size = .05, use_fit_gains = False, output_size=1) -> None:
super(HGF, self).__init__(paramsHGF)

self.model_name = 'HGF'
self.output_names =['x1']
self.state_names = np.array(['x2', 'x3'])
self.pop_size = 1 # 3 populations JR
self.state_size = 2 # 2 states in each population
self.tr = tr # tr ms (integration step 0.1 ms)
self.step_size = torch.tensor(step_size, dtype=torch.float32) # integration step 0.1 ms
self.steps_per_TR = int(tr / step_size)
self.TRs_per_window = TRperWindow # size of the batch used at each step
self.node_size = node_size # num of ROI
self.output_size = output_size # num of EEG channels
self.use_fit_gains = use_fit_gains
#self.params = paramsHGF
self.setModelParameters()
self.setHyperParameters()


def forward(self, externa=None, X=None, hE=None):
omega3 = self.params.omega_3.value()
omega2 = self.params.omega_2.value()
kappa = self.params.kappa.value()
x2mean = self.params.x2mean.value()
deca2 = self.params.deca2.value()
deca3 = self.params.deca3.value()
c = self.params.c.value()
g_x2_x3 = self.params.g_x2_x3.value()
g_x3_x2 = self.params.g_x3_x2.value()
dt = self.step_size
next_state = {}

state_windows = []
x1_windows = []
x2=X[:,0:1]
x3=X[:,1:2]
for TR_i in range(self.TRs_per_window):
x2_tmp =[]
x3_tmp =[]
# Since tr is about second we need to use a small step size like 0.05 to integrate the model states.
for step_i in range(self.steps_per_TR):


x3_new = x3 -dt*(deca3*x3-g_x2_x3*x2)+torch.sqrt(dt)*torch.randn(self.node_size,1)*omega3
x2_new = x2 -dt*(deca2*x2- g_x3_x2*x3)+ torch.sqrt(dt)*torch.randn(self.node_size,1)*omega2*torch.exp(kappa*x3)

x2_tmp.append(x2_new)
x3_tmp.append(x3_new)
x2 = 10*torch.tanh(x2_new/10)
x3 = 10*torch.tanh(x3_new/10)
#x2 = sum(x2_tmp)/self.steps_per_TR#10*torch.tanh(x2_new/10)
#x3 = sum(x3_tmp)/self.steps_per_TR#10*torch.tanh(x3_new/10)
state_windows.append(torch.cat([x2, x3], dim =1)[:,:,np.newaxis])
x1_windows.append(x2-x2mean)
#x1_windows.append(1/(1+torch.exp(-c*(x2-k))))
next_state['x1'] = torch.cat(x1_windows, dim =1)
next_state['states'] = torch.cat(state_windows, dim =2)
next_state['current_state'] = torch.cat([x2, x3], dim =1)
return next_state, hE


def createIC(self, ver):
"""
A function to return an initial state tensor for the model.
Parameters
----------
ver: int
Ignored Parameter
Returns
----------
Tensor
Random Initial Conditions for RWW & BOLD
"""

# initial state
return torch.tensor(0 * np.random.uniform(-0.02, 0.02, (self.node_size, self.state_size))+np.array([0.0,0.5]), dtype=torch.float32)


def setHyperParameters(self):
"""
Sets the parameters of the model.
"""



# Set w_bb, w_ff, and w_ll as attributes as type Parameter if use_fit_gains is True
self.mu2 = Parameter(torch.tensor(0.0*np.ones((self.node_size, 1)), # the lateral gains
dtype=torch.float32))
self.mu3 = Parameter(torch.tensor(1.0*np.ones((self.node_size, 1)), # the lateral gains
dtype=torch.float32))
self.var_inv_2 = Parameter(torch.tensor(.1*np.ones((self.node_size, 1)), # the lateral gains
dtype=torch.float32))
self.var_inv_3 = Parameter(torch.tensor(.1*np.ones((self.node_size, 1)), # the lateral gains
dtype=torch.float32))
self.params_fitted['hyperparameter'].append(self.mu2)
self.params_fitted['hyperparameter'].append(self.mu3)
self.params_fitted['hyperparameter'].append(self.var_inv_2)
self.params_fitted['hyperparameter'].append(self.var_inv_3)
Loading

0 comments on commit fe40096

Please sign in to comment.