From c1ca438600d7107bce8726997e803325a7a8ec93 Mon Sep 17 00:00:00 2001 From: Parsa Oveisi Date: Wed, 13 Mar 2024 17:06:07 -0400 Subject: [PATCH] added comments and doctstings --- whobpyt/run/batchfitting.py | 12 +++++++++++- whobpyt/run/customfitting.py | 12 +++++++++++- whobpyt/run/modelfitting.py | 14 ++++++++------ 3 files changed, 30 insertions(+), 8 deletions(-) diff --git a/whobpyt/run/batchfitting.py b/whobpyt/run/batchfitting.py index 35fea022..3ad964f7 100644 --- a/whobpyt/run/batchfitting.py +++ b/whobpyt/run/batchfitting.py @@ -29,6 +29,16 @@ class Fitting_Batch(AbstractFitting): """ def __init__(self, model, cost, device = torch.device('cpu')): + """ + Parameters + ---------- + model: AbstractNMM + Whole Brain Model to Simulate + cost: AbstractLoss + A particular objective function which the model will be optimized for. + device : torch.device + Whether the fitting is to run on CPU or GPU + """ self.model = model self.cost = cost @@ -142,4 +152,4 @@ def simulate(self, stim, numTP): Not implemented yet. ''' pass - \ No newline at end of file + diff --git a/whobpyt/run/customfitting.py b/whobpyt/run/customfitting.py index f41cfab8..89565f73 100644 --- a/whobpyt/run/customfitting.py +++ b/whobpyt/run/customfitting.py @@ -35,6 +35,16 @@ class Fitting_FNGFPG(AbstractFitting): """ def __init__(self, model, cost, device = torch.device('cpu')): + """ + Parameters + ---------- + model: AbstractNMM + Whole Brain Model to Simulate + cost: AbstractLoss + A particular objective function which the model will be optimized for. + device : torch.device + Whether the fitting is to run on CPU or GPU + """ self.model = model self.cost = cost @@ -173,4 +183,4 @@ def simulate(self, stim, numTP): ''' Not implemented yet. ''' - pass \ No newline at end of file + pass diff --git a/whobpyt/run/modelfitting.py b/whobpyt/run/modelfitting.py index 074686d2..0aa224e4 100644 --- a/whobpyt/run/modelfitting.py +++ b/whobpyt/run/modelfitting.py @@ -1,5 +1,5 @@ """ -Authors: Zheng Wang, John Griffiths, Andrew Clappison, Hussain Ather, Kevin Kadak +Authors: Zheng Wang, John Griffiths, Andrew Clappison, Hussain Ather, Kevin Kadak, Parsa Oveisi Neural Mass Model fitting module for model fitting using pytorch """ @@ -86,7 +86,9 @@ def train(self, u, empRecs: list, num_epochs: int the number of times to go through the entire training data set TPperWindow: int - Number of Empirical Time Points per window. model.forward does one window at a time. + Number of Empirical Time Points per window. model.forward does one window at a time. + warmupWindow: int + Number of Time Points to use as warmup (not used for training). learningrate: float rate of gradient descent lr_2ndLevel: float @@ -217,14 +219,14 @@ def train(self, u, empRecs: list, hE = hE_new.detach().clone() #dtype=torch.float32 ts_emp = np.concatenate(list(windowedTS),1) #TODO: Check this code - fc = np.corrcoef(ts_emp) + fc = np.corrcoef(ts_emp) # calculating the Pearson correlation for measure of functional connectivity # TIME SERIES: Concatenate all windows together to get one recording for name in set(self.model.state_names + self.model.output_names): windListDict[name] = np.concatenate(windListDict[name], axis=1) ts_sim = windListDict[self.model.output_names[0]] - fc_sim = np.corrcoef(ts_sim[:, 10:]) + fc_sim = np.corrcoef(ts_sim[:, 10:]) # disregarding the first few timepoints as the model stabilizes print('epoch: ', i_epoch, 'loss:', loss_main.detach().cpu().numpy(), @@ -407,9 +409,9 @@ def simulate(self, u, numTP: int, base_window_num: int = 0, transient_num: int = windListDict[name] = np.concatenate(windListDict[name], axis=1) ts_sim = windListDict[self.model.output_names[0]] - fc_sim = np.corrcoef(ts_sim[:, transient_num:]) + fc_sim = np.corrcoef(ts_sim[:, transient_num:]) # disregarding the first few timepoints as the model stabilizes # Saving the last recording of training as a Model_fitting attribute self.lastRec = {} for name in set(self.model.state_names + self.model.output_names): - self.lastRec[name] = Recording(windListDict[name], step_size = self.model.step_size) #TODO: This won't work if different variables have different step sizes \ No newline at end of file + self.lastRec[name] = Recording(windListDict[name], step_size = self.model.step_size) #TODO: This won't work if different variables have different step sizes