Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 11 additions & 1 deletion whobpyt/run/batchfitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,16 @@ class Fitting_Batch(AbstractFitting):
"""

def __init__(self, model, cost, device = torch.device('cpu')):
"""
Parameters
----------
model: AbstractNMM
Whole Brain Model to Simulate
cost: AbstractLoss
A particular objective function which the model will be optimized for.
device : torch.device
Whether the fitting is to run on CPU or GPU
"""

self.model = model
self.cost = cost
Expand Down Expand Up @@ -142,4 +152,4 @@ def simulate(self, stim, numTP):
Not implemented yet.
'''
pass

12 changes: 11 additions & 1 deletion whobpyt/run/customfitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,16 @@ class Fitting_FNGFPG(AbstractFitting):
"""

def __init__(self, model, cost, device = torch.device('cpu')):
"""
Parameters
----------
model: AbstractNMM
Whole Brain Model to Simulate
cost: AbstractLoss
A particular objective function which the model will be optimized for.
device : torch.device
Whether the fitting is to run on CPU or GPU
"""

self.model = model
self.cost = cost
Expand Down Expand Up @@ -173,4 +183,4 @@ def simulate(self, stim, numTP):
'''
Not implemented yet.
'''
pass
pass
14 changes: 8 additions & 6 deletions whobpyt/run/modelfitting.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
"""
Authors: Zheng Wang, John Griffiths, Andrew Clappison, Hussain Ather, Kevin Kadak
Authors: Zheng Wang, John Griffiths, Andrew Clappison, Hussain Ather, Kevin Kadak, Parsa Oveisi
Neural Mass Model fitting module for model fitting using pytorch
"""

Expand Down Expand Up @@ -86,7 +86,9 @@ def train(self, u, empRecs: list,
num_epochs: int
the number of times to go through the entire training data set
TPperWindow: int
Number of Empirical Time Points per window. model.forward does one window at a time.
Number of Empirical Time Points per window. model.forward does one window at a time.
warmupWindow: int
Number of Time Points to use as warmup (not used for training).
learningrate: float
rate of gradient descent
lr_2ndLevel: float
Expand Down Expand Up @@ -217,14 +219,14 @@ def train(self, u, empRecs: list,
hE = hE_new.detach().clone() #dtype=torch.float32

ts_emp = np.concatenate(list(windowedTS),1) #TODO: Check this code
fc = np.corrcoef(ts_emp)
fc = np.corrcoef(ts_emp) # calculating the Pearson correlation for measure of functional connectivity

# TIME SERIES: Concatenate all windows together to get one recording
for name in set(self.model.state_names + self.model.output_names):
windListDict[name] = np.concatenate(windListDict[name], axis=1)

ts_sim = windListDict[self.model.output_names[0]]
fc_sim = np.corrcoef(ts_sim[:, 10:])
fc_sim = np.corrcoef(ts_sim[:, 10:]) # disregarding the first few timepoints as the model stabilizes

print('epoch: ', i_epoch,
'loss:', loss_main.detach().cpu().numpy(),
Expand Down Expand Up @@ -407,9 +409,9 @@ def simulate(self, u, numTP: int, base_window_num: int = 0, transient_num: int =
windListDict[name] = np.concatenate(windListDict[name], axis=1)

ts_sim = windListDict[self.model.output_names[0]]
fc_sim = np.corrcoef(ts_sim[:, transient_num:])
fc_sim = np.corrcoef(ts_sim[:, transient_num:]) # disregarding the first few timepoints as the model stabilizes

# Saving the last recording of training as a Model_fitting attribute
self.lastRec = {}
for name in set(self.model.state_names + self.model.output_names):
self.lastRec[name] = Recording(windListDict[name], step_size = self.model.step_size) #TODO: This won't work if different variables have different step sizes
self.lastRec[name] = Recording(windListDict[name], step_size = self.model.step_size) #TODO: This won't work if different variables have different step sizes