diff --git a/.codespellrc b/.codespellrc new file mode 100644 index 00000000..925aba72 --- /dev/null +++ b/.codespellrc @@ -0,0 +1,6 @@ +[codespell] +# Ref: https://github.com/codespell-project/codespell#using-a-config-file +skip = .git*,.codespellrc +check-hidden = true +ignore-regex = ((^\s*"image/\S+": "|"output_type":"stream").*|Ather SH|Hussain Ather) +ignore-words-list = numer diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml new file mode 100644 index 00000000..c59e0473 --- /dev/null +++ b/.github/workflows/codespell.yml @@ -0,0 +1,25 @@ +# Codespell configuration is within .codespellrc +--- +name: Codespell + +on: + push: + branches: [main] + pull_request: + branches: [main] + +permissions: + contents: read + +jobs: + codespell: + name: Check for spelling errors + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Annotate locations with typos + uses: codespell-project/codespell-problem-matcher@v1 + - name: Codespell + uses: codespell-project/actions-codespell@v2 diff --git a/doc/about_whobpyt/architecture.md b/doc/about_whobpyt/architecture.md index e7362d09..10dfab07 100644 --- a/doc/about_whobpyt/architecture.md +++ b/doc/about_whobpyt/architecture.md @@ -1,7 +1,7 @@ Code Architecture =================================== -The package is a collection of interchangable Whole Brain Models, Objective Functions Components, and Parameter Fitting Paradigms. This code base is still in alpha phase and not all combinations of these components are currently supported. +The package is a collection of interchangeable Whole Brain Models, Objective Functions Components, and Parameter Fitting Paradigms. This code base is still in alpha phase and not all combinations of these components are currently supported. ## Simplified Usage Pseudo Code @@ -30,7 +30,7 @@ plot(fitting.trainingStats) verify_model = NumPyNMM(model.params) simulated_data = verify_model.simulate() -# Preform Analysis +# Perform Analysis ... ``` @@ -41,7 +41,7 @@ These models implement the numerical simulation of a CNMM (or modified CNMM). Th The built in models are: -- RWWExcInb - Two variations are avaliable +- RWWExcInb - Two variations are available - JansenRit - With Lead Field, Delays, Laplacian Connections - Linear (needs updating) - Robinson (Future Addition) @@ -64,7 +64,7 @@ Paradigms for fitting model parameters. The built in parameter fitting paradigms are: -- Model Fitting - Uses a approch to train on windowed sections of neuroimaging recordings +- Model Fitting - Uses a approach to train on windowed sections of neuroimaging recordings - Fitting FNGFPG - A technique to run true time scale BOLD diff --git a/doc/about_whobpyt/background.md b/doc/about_whobpyt/background.md index 8f9c7d86..746fe618 100644 --- a/doc/about_whobpyt/background.md +++ b/doc/about_whobpyt/background.md @@ -6,7 +6,7 @@ The Whole Brain Models are of the form of Connectome-based Neural Mass Models (C ## Neuroimaging Data -The following items may be requried (in a consistent parcellated format), depending on the CNMM model and empirical data: +The following items may be required (in a consistent parcellated format), depending on the CNMM model and empirical data: - Structural Connectivity Matrix : For connection strengths - Distance Matrix : For connection delays diff --git a/doc/about_whobpyt/overview.rst b/doc/about_whobpyt/overview.rst index 4b841030..fc936653 100644 --- a/doc/about_whobpyt/overview.rst +++ b/doc/about_whobpyt/overview.rst @@ -3,9 +3,9 @@ Overview Whole Brain Modelling in PyTorch (WhoBPyT) is a Python package for fitting parameters of Whole Brain Models (WBM) to neuroimaging data. In particular, differential equation based WBMs such as Connectome-based Neural Mass Models (CNMM) can be implemented in PyTorch, and by doing so the simulated neuoimaging data can be backpropagated through time to update model parameters. This is the deep learning approach that WhoBPyT uses. -In order to use this package, a brain model, objective function, and parameter fitting paradigm must be chosen. The appropriate choices will depend on the research question and the neuroimaging data avaliable. Data must be processed ahead of time into a consistent parcellated format. +In order to use this package, a brain model, objective function, and parameter fitting paradigm must be chosen. The appropriate choices will depend on the research question and the neuroimaging data available. Data must be processed ahead of time into a consistent parcellated format. -After fitting, the result will be one or more sets of paramters. It's important to verify these parameters in another model implementation, as the models implemented in PyTorch may have default or optional moditications that deviate from the original model's dynamics. +After fitting, the result will be one or more sets of parameters. It's important to verify these parameters in another model implementation, as the models implemented in PyTorch may have default or optional moditications that deviate from the original model's dynamics. If you use this package, please consider citing the following papers: diff --git a/doc/conf.py b/doc/conf.py index d26df79b..4d12c124 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -8,7 +8,7 @@ # build, prefer to use the version of the theme in this repo, not the installed # version of the theme. def is_development_build(): - # PR builds have an interger version + # PR builds have an integer version re_version = re.compile(r'^[\d]+$') if 'READTHEDOCS' in os.environ: version = os.environ.get('READTHEDOCS_VERSION', '') diff --git a/examples/LinearRNN_Example.ipynb b/examples/LinearRNN_Example.ipynb index 49be8a37..6cd0f078 100644 --- a/examples/LinearRNN_Example.ipynb +++ b/examples/LinearRNN_Example.ipynb @@ -41,7 +41,7 @@ "# When calculating the correlation error we only use the upper or lower triangle (Functional Connectivity is symmetric)\n", "\n", "# The generation of this synthetic FC is not important.\n", - "# You can replace this syntetic FC with an empirical FC matrix if you like!\n", + "# You can replace this synthetic FC with an empirical FC matrix if you like!\n", "num_regions = 40\n", "temp = (2*torch.rand(num_regions,num_regions)-1)\n", "temp[range(num_regions), range(num_regions)] = 1\n", @@ -228,7 +228,7 @@ " #noise = torch.randn(input_length, 1, num_regions) # Create input noise to RNN\n", " output, hn = rnn(init_state, sim_len) # Run the RNN\n", " simFC = torch.corrcoef(torch.transpose(torch.squeeze(output, 1), 0, 1)) # Calculate the Functional Connectivity of the RNN output\n", - " simFCvec = simFC[triIdx[0], triIdx[1]] #Extract upper traingle of RNN Functional Connectivity\n", + " simFCvec = simFC[triIdx[0], triIdx[1]] #Extract upper triangle of RNN Functional Connectivity\n", " corVal = torch.corrcoef(torch.stack((targetFCvec, simFCvec)))[0,1] # Correlated RNN FC (upper triangle) with Target FC (upper triangle)\n", "\n", " # Choose one of the below loss functions (NOTE: Only 1 should be uncommented)\n", diff --git a/examples/Multimodal_Simulation_Example.ipynb b/examples/Multimodal_Simulation_Example.ipynb index a11319bc..3d64d104 100644 --- a/examples/Multimodal_Simulation_Example.ipynb +++ b/examples/Multimodal_Simulation_Example.ipynb @@ -62,7 +62,7 @@ "skip_trans = int(500/step_size)\n", "\n", "# Initial Conditions\n", - "S_E = 0.6; S_I = 0.1; x = 0.0000; f = 2.4286; v = 1.3283; q = 0.6144 # x,f,v,q might be choosen for different initial S_E\n", + "S_E = 0.6; S_I = 0.1; x = 0.0000; f = 2.4286; v = 1.3283; q = 0.6144 # x,f,v,q might be chosen for different initial S_E\n", "init_state = torch.tensor([[S_E, S_I, x, f, v, q]]).repeat(num_regions, 1)\n", "\n", "# Add randomness\n", @@ -298,7 +298,7 @@ "outputs": [], "source": [ "## Written in such as way as to be able to adjust the relative importance of components that make up the objective function.\n", - "## Also, written in such a way as to be able to track and plot indiviual components losses over time. \n", + "## Also, written in such a way as to be able to track and plot individual components losses over time. \n", "\n", "class objectiveFunction():\n", " def __init__(self):\n", diff --git a/examples/Multimodal_Simulation_Example_Comparing_Delayed_Laplacian_2022-05-30.ipynb b/examples/Multimodal_Simulation_Example_Comparing_Delayed_Laplacian_2022-05-30.ipynb index 77596a04..c6f53546 100644 --- a/examples/Multimodal_Simulation_Example_Comparing_Delayed_Laplacian_2022-05-30.ipynb +++ b/examples/Multimodal_Simulation_Example_Comparing_Delayed_Laplacian_2022-05-30.ipynb @@ -62,7 +62,7 @@ "skip_trans = int(2000/step_size)\n", "\n", "# Initial Conditions\n", - "S_E = 0.20; S_I = 0.05; x = 0.0000; f = 2.4286; v = 1.3283; q = 0.6144 # x,f,v,q might be choosen for different initial S_E\n", + "S_E = 0.20; S_I = 0.05; x = 0.0000; f = 2.4286; v = 1.3283; q = 0.6144 # x,f,v,q might be chosen for different initial S_E\n", "init_state = torch.tensor([[S_E, S_I, x, f, v, q]]).repeat(num_regions, 1)\n", "\n", "# Add randomness\n", diff --git a/examples/eg002r__multimodal_simulation.py b/examples/eg002r__multimodal_simulation.py index 89523b1f..2056ff68 100644 --- a/examples/eg002r__multimodal_simulation.py +++ b/examples/eg002r__multimodal_simulation.py @@ -40,7 +40,7 @@ import seaborn as sns import matplotlib.pyplot as plt -print("Is cuda avaliable?") +print("Is cuda available?") print(torch.cuda.is_available()) device = torch.device("cpu") #Options: "cpu" or "cuda" @@ -60,7 +60,7 @@ skip_trans = int(500/step_size) # Initial Conditions -S_E = 0.6; S_I = 0.1; x = 0.0000; f = 2.4286; v = 1.3283; q = 0.6144 # x,f,v,q might be choosen for different initial S_E +S_E = 0.6; S_I = 0.1; x = 0.0000; f = 2.4286; v = 1.3283; q = 0.6144 # x,f,v,q might be chosen for different initial S_E init_state = torch.tensor([[S_E, S_I, x, f, v, q]]).repeat(num_regions, 1) # Add randomness @@ -113,7 +113,7 @@ # --------------------------------------------------- # # Written in such as way as to be able to adjust the relative importance of components that make up the objective function. -# Also, written in such a way as to be able to track and plot indiviual components losses over time. +# Also, written in such a way as to be able to track and plot individual components losses over time. class mmObjectiveFunction(): def __init__(self): diff --git a/examples/eg005r__gpu_support.py b/examples/eg005r__gpu_support.py index e173cac2..306b0c94 100644 --- a/examples/eg005r__gpu_support.py +++ b/examples/eg005r__gpu_support.py @@ -4,7 +4,7 @@ Evaluating CPU vs. GPU Performance ================================= -GPU Support has been added to mutiple classes in WhoBPyT. This code is for evaluating the difference in speed between CPU and GPU. The relative performance will depend on the hardware being used. +GPU Support has been added to multiple classes in WhoBPyT. This code is for evaluating the difference in speed between CPU and GPU. The relative performance will depend on the hardware being used. This code is set to run on CPU by default, and then GPU can be tested by updating the device (See Importage Section). @@ -38,7 +38,7 @@ import seaborn as sns import matplotlib.pyplot as plt -print("Is cuda avaliable?") +print("Is cuda available?") print(torch.cuda.is_available()) device = torch.device("cpu") #Options: "cpu" or "cuda" diff --git a/whobpyt/data/generators.py b/whobpyt/data/generators.py index 2a17bad1..3d74b3f7 100644 --- a/whobpyt/data/generators.py +++ b/whobpyt/data/generators.py @@ -106,7 +106,7 @@ def syntheticSC(numRegions, seed = None, maxConDist = 50): Args: numRegions (int): The number of regions in the connectome (must be an even number). - seed (int, optional): value to use as np.random.seed() for reproducability.. Defaults to None. + seed (int, optional): value to use as np.random.seed() for reproducibility.. Defaults to None. maxConDist (int, optional): The max distance between regions such that less than this distance there can still be a connection strength. May wish to scale this based on `numRegions`. Defaults to 50. Raises: @@ -147,7 +147,7 @@ def syntheticSC(numRegions, seed = None, maxConDist = 50): continue dist = np.linalg.norm(np.array(loc[x]) - np.array(loc[y])) #Distance between two regions if dist < maxConDist: - # If the distance between two regions is less than maxConDist, then connection strenth is calculated as follows + # If the distance between two regions is less than maxConDist, then connection strength is calculated as follows con[x,y] = (maxConDist-dist)/maxConDist return con, loc diff --git a/whobpyt/datatypes/AbstractMode.py b/whobpyt/datatypes/AbstractMode.py index 334e5085..13d09175 100644 --- a/whobpyt/datatypes/AbstractMode.py +++ b/whobpyt/datatypes/AbstractMode.py @@ -1,7 +1,7 @@ import torch class AbstractMode: - # Neuroimaging Modalities such as EEG and fMRI BOLD may be seperate from the NMM model + # Neuroimaging Modalities such as EEG and fMRI BOLD may be separate from the NMM model # and implemented by inheriting from this class. # Going forward, the recommendation is to have the modalities integrated with the model. diff --git a/whobpyt/datatypes/AbstractParams.py b/whobpyt/datatypes/AbstractParams.py index 05b127ba..55d10320 100644 --- a/whobpyt/datatypes/AbstractParams.py +++ b/whobpyt/datatypes/AbstractParams.py @@ -3,14 +3,14 @@ class AbstractParams: # This class stores the parameters used by a model. The parameters may be for the Neural Mass Model and/or Neuroimaging Modality. - # It should be useable by both the pytorch model for training and a numpy model for parameter verification. + # It should be usable by both the pytorch model for training and a numpy model for parameter verification. def __init__(self, **kwargs): # Define the parameters using the par data structure pass def getFittedNames(self): - # Returns a named list of paramters that are being fitted + # Returns a named list of parameters that are being fitted # Assumes the par datastructure is being used for parameters fp = [] diff --git a/whobpyt/datatypes/outputs.py b/whobpyt/datatypes/outputs.py index e8218cb0..dfdad614 100644 --- a/whobpyt/datatypes/outputs.py +++ b/whobpyt/datatypes/outputs.py @@ -84,7 +84,7 @@ def reset(self): def appendLoss(self, newValue): """ - Append Trainig Loss + Append Training Loss Parameters ----------- diff --git a/whobpyt/depr/fit.py b/whobpyt/depr/fit.py index 32c963a8..0c539e67 100644 --- a/whobpyt/depr/fit.py +++ b/whobpyt/depr/fit.py @@ -30,7 +30,7 @@ def __init__(self, model, ts, num_epoches, cost): ts: array with num_tr x node_size empirical EEG time-series num_epoches: int - the times for repeating trainning + the times for repeating training """ self.model = model self.num_epoches = num_epoches @@ -84,7 +84,7 @@ def train(self, u=0): hE = torch.tensor(np.random.uniform(state_lb, state_ub, (self.model.node_size, delays_max)), dtype=torch.float32) - # define masks for geting lower triangle matrix + # define masks for getting lower triangle matrix mask = np.tril_indices(self.model.node_size, -1) mask_e = np.tril_indices(self.model.output_size, -1) @@ -142,7 +142,7 @@ def train(self, u=0): next_batch, hE_new = self.model(external, X, hE) - # Get the batch of emprical EEG signal. + # Get the batch of empirical EEG signal. ts_batch = torch.tensor( (eeg.T[i_batch * self.model.batch_size:(i_batch + 1) * self.model.batch_size, :]).T, dtype=torch.float32) @@ -298,7 +298,7 @@ def test(self, x0, he0, base_batch_num, u=0): # placeholders for model parameters - # define mask for geting lower triangle matrix + # define mask for getting lower triangle matrix mask = np.tril_indices(self.model.node_size, -1) mask_e = np.tril_indices(self.model.output_size, -1) @@ -421,4 +421,4 @@ def test_realtime(self, num_batches): tmp_ls = getattr(self.output_sim, name + '_test') setattr(self.output_sim, name + '_test', np.concatenate(tmp_ls, axis=1)) else: - print("only WWD model for the test_realtime funcion") + print("only WWD model for the test_realtime function") diff --git a/whobpyt/depr/models.py b/whobpyt/depr/models.py index 1432e86f..cbad818e 100644 --- a/whobpyt/depr/models.py +++ b/whobpyt/depr/models.py @@ -225,7 +225,7 @@ def forward(self, init_state, sim_len, useDelays = False, useLaplacian = False, if(useLaplacian & (not useDelays)): # WARNING: This has not been tested - # NOTE: We are acutally using the NEGATIVE Laplacian + # NOTE: We are actually using the NEGATIVE Laplacian Laplacian_diagonal = -torch.diag(torch.sum(self.Con_Mtx, axis=1)) #Con_Mtx should be normalized, so this should just add a diagonal of -1's S_E_laplacian = torch.matmul(self.Con_Mtx + Laplacian_diagonal, S_E) @@ -236,7 +236,7 @@ def forward(self, init_state, sim_len, useDelays = False, useLaplacian = False, if(useDelays & useLaplacian): # WARNING: This has not been tested - # NOTE: We are acutally using the NEGATIVE Laplacian + # NOTE: We are actually using the NEGATIVE Laplacian Laplacian_diagonal = -torch.diag(torch.sum(self.Con_Mtx, axis=1)) #Con_Mtx should be normalized, so this should just add a diagonal of -1's @@ -393,7 +393,7 @@ def __init__(self): #Starting Condition #x = 1 # vasodilatory signal #f = 1 # inflow - #v = 1 # blood volumne + #v = 1 # blood volume #q = 1 # deoxyhemoglobin content class BOLD_Layer(torch.nn.Module): @@ -429,7 +429,7 @@ def __init__(self, num_regions, params, useBC = False): #Starting Condition #x = 1 # vasodilatory signal #f = 1 # inflow - #v = 1 # blood volumne + #v = 1 # blood volume #q = 1 # deoxyhemoglobin content ############################################# @@ -950,7 +950,7 @@ def __init__(self, model_name, **kwargs): class RNNJANSEN(torch.nn.Module): """ A module for forward model (JansenRit) to simulate a batch of EEG signals - Attibutes + Attributes --------- state_size : int the number of states in the JansenRit model @@ -1136,7 +1136,7 @@ def forward(self, input, hx, hE): self.delays = (self.dist / (conduct_lb * con_1 + m(self.mu))).type(torch.int64) # print(torch.max(self.delays), self.delays.shape) - # placeholder for the updated corrent state + # placeholder for the updated current state current_state = torch.zeros_like(hx) # placeholders for output BOLD, history of E I x f v and q @@ -1331,7 +1331,7 @@ def h_tf(a, b, d, z): class RNNWWD(torch.nn.Module): """ A module for forward model (WWD) to simulate a batch of BOLD signals - Attibutes + Attributes --------- state_size : int the number of states in the WWD model @@ -1516,7 +1516,7 @@ def forward(self, external, hx, hE): l_s = torch.tensor(np.zeros((1, 1)), dtype=torch.float32) - # placeholder for the updated corrent state + # placeholder for the updated current state current_state = torch.zeros_like(hx) # placeholders for output BOLD, history of E I x f v and q @@ -1638,7 +1638,7 @@ class WWD_np( ): """ A module for forward model (WWD) to simulate a batch of BOLD signals - Attibutes + Attributes --------- state_size : int the number of states in the WWD model diff --git a/whobpyt/depr/objective.py b/whobpyt/depr/objective.py index a523b2f2..d2a6c136 100644 --- a/whobpyt/depr/objective.py +++ b/whobpyt/depr/objective.py @@ -121,7 +121,7 @@ def calcLoss(self, simData): class functionalConnectivityLoss(): # Right now this fit's to a fixed "empirical" FC matrix, but in the future # will change to fit to a time series of FC - # Furthermore, for computational effeciency a batch of overlapping FC's will + # Furthermore, for computational efficiency a batch of overlapping FC's will # be calculated to create a kind of mini-batch back propagation def __init__(self, num_regions, varIdx, targetValue = None, empiricalData = None): diff --git a/whobpyt/functions/arg_type_check.py b/whobpyt/functions/arg_type_check.py index 7f034158..4ecf2ffd 100644 --- a/whobpyt/functions/arg_type_check.py +++ b/whobpyt/functions/arg_type_check.py @@ -5,7 +5,7 @@ def method_arg_type_check(method_obj, exclude = []): """ Takes the method object of a given function (e.g. RNNJANSEN) and checks that the passed arguments abide by their - expected data types. If there is a discrepency, raises a ValueError. + expected data types. If there is a discrepancy, raises a ValueError. Optional argument: exclude List of strings containing argument names to exclude from the check (e.g. ['step_size', 'params']). diff --git a/whobpyt/models/BOLD/BOLD.py b/whobpyt/models/BOLD/BOLD.py index 4ec143da..c693e658 100644 --- a/whobpyt/models/BOLD/BOLD.py +++ b/whobpyt/models/BOLD/BOLD.py @@ -46,7 +46,7 @@ def createIC(self, ver): #Starting Condition #x = 1 # vasodilatory signal #f = 1 # inflow - #v = 1 # blood volumne + #v = 1 # blood volume #q = 1 # deoxyhemoglobin content pass diff --git a/whobpyt/models/JansenRit/jansen_rit.py b/whobpyt/models/JansenRit/jansen_rit.py index 65975c96..fceeb963 100644 --- a/whobpyt/models/JansenRit/jansen_rit.py +++ b/whobpyt/models/JansenRit/jansen_rit.py @@ -23,7 +23,7 @@ class RNNJANSEN(AbstractNMM): """ A module for forward model (JansenRit) to simulate EEG signals - Attibutes + Attributes --------- state_size : int Number of states in the JansenRit model diff --git a/whobpyt/models/Linear/linear.py b/whobpyt/models/Linear/linear.py index 4a11ab22..7686dfdc 100644 --- a/whobpyt/models/Linear/linear.py +++ b/whobpyt/models/Linear/linear.py @@ -25,7 +25,7 @@ def __init__(self, model_name, **kwargs): class RNNLIN(AbstractNMM): """ A module for forward model (Linear Model with 1 population) to simulate a window of BOLD signals - Attibutes + Attributes --------- state_size : int the number of states in the WWD model diff --git a/whobpyt/models/RWW/README.md b/whobpyt/models/RWW/README.md index ee74d817..b18e5ef1 100644 --- a/whobpyt/models/RWW/README.md +++ b/whobpyt/models/RWW/README.md @@ -11,9 +11,9 @@ * Fitting RWW Weights * Fitting Structural Connectivity Weights -* Negative Laplacian of the Structual Connectivity Matrix used +* Negative Laplacian of the Structural Connectivity Matrix used * Boundary Functions on State Variables -* ReLU functions applied to parameters to prevent certian parameters from changing sign in the equations +* ReLU functions applied to parameters to prevent certain parameters from changing sign in the equations * A kind of downsampling between the RWW and the BOLD dynamics * Faster BOLD dynamics to reduce computer memory requirement * Custom objective function components involving hyperparameters diff --git a/whobpyt/models/RWW/RWW_np.py b/whobpyt/models/RWW/RWW_np.py index ba8dd326..82c26a8e 100644 --- a/whobpyt/models/RWW/RWW_np.py +++ b/whobpyt/models/RWW/RWW_np.py @@ -25,7 +25,7 @@ class RWW_np: """ A module for forward model (WWD) to simulate a batch of BOLD signals - Attibutes + Attributes --------- state_size: int the number of states in the WWD model diff --git a/whobpyt/models/RWW/wong_wang.py b/whobpyt/models/RWW/wong_wang.py index bfe3ab07..da962d68 100644 --- a/whobpyt/models/RWW/wong_wang.py +++ b/whobpyt/models/RWW/wong_wang.py @@ -34,7 +34,7 @@ class RNNRWW(AbstractNMM): state_size : int The number of states in the WWD model tr : float - tr of fMRI image. That is, the spacing betweeen images in the time series. + tr of fMRI image. That is, the spacing between images in the time series. step_size: float Integration step for forward model steps_per_TR: int @@ -58,11 +58,11 @@ class RNNRWW(AbstractNMM): use_Gaussian_EI: bool Use a custom objective function component use_dynamic_boundary: bool - Whether to have tanh function applied at each time step to constrain parameter values. Simulation results will become dependent on a certian step_size. + Whether to have tanh function applied at each time step to constrain parameter values. Simulation results will become dependent on a certain step_size. params: ParamsRWW A object that contains the parameters for the RWW nodes params_fitted: dictionary - A dictionary containg fitted parameters and fitted hyper_parameters + A dictionary containing fitted parameters and fitted hyper_parameters output_size: int Number of ROIs @@ -112,7 +112,7 @@ def __init__(self, node_size: int, sampling_size: This is related to an averaging of NMM values before inputing into hemodynamic equaitons. This is non-standard. tr : float - tr of fMRI image. That is, the spacing betweeen images in the time series. + tr of fMRI image. That is, the spacing between images in the time series. sc: float node_size x node_size array The structural connectivity matrix use_fit_gains: bool @@ -126,7 +126,7 @@ def __init__(self, node_size: int, use_Laplacian: bool Whether to use the negative laplacian of the (fitted) structural connectivity as the structural connectivity use_dynamic_boundary: bool - Whether to have tanh function applied at each time step to constrain parameter values. Simulation results will become dependent on a certian step_size. + Whether to have tanh function applied at each time step to constrain parameter values. Simulation results will become dependent on a certain step_size. """ method_arg_type_check(self.__init__) # Check that the passed arguments (excluding self) abide by their expected data types diff --git a/whobpyt/models/RWWEI2/Multimodal_RWWEI2.py b/whobpyt/models/RWWEI2/Multimodal_RWWEI2.py index 3b027d56..26469e95 100644 --- a/whobpyt/models/RWWEI2/Multimodal_RWWEI2.py +++ b/whobpyt/models/RWWEI2/Multimodal_RWWEI2.py @@ -98,7 +98,7 @@ def blockTS(data, blocks, numNodes, numSV): n = torch.numel(data) if (not (n%blocks == 0)): - print("ERROR: data is not divisable by blocks") + print("ERROR: data is not divisible by blocks") return newTimeDim = int(n/(blocks*numNodes*numSV)) diff --git a/whobpyt/models/RWWEI2/RWWEI2.py b/whobpyt/models/RWWEI2/RWWEI2.py index de2fc22d..548cd139 100644 --- a/whobpyt/models/RWWEI2/RWWEI2.py +++ b/whobpyt/models/RWWEI2/RWWEI2.py @@ -31,7 +31,7 @@ class RWWEI2(AbstractNMM): output_size : Int The number of brain regions used in the model state_names : List of Strings - A list of the state varaible names of the model + A list of the state variable names of the model output_names : List of Strings A list of the output variable names of the model track_params : @@ -125,7 +125,7 @@ def setBlocks(self, num_blocks): def genNoise(self, block_len, batched = False): ''' - This generates noise to be used by the model. It is particulary useful for the FNGFPG design + This generates noise to be used by the model. It is particularly useful for the FNGFPG design where the same noise must be used be restructure for two different forward passes. Parameters @@ -288,7 +288,7 @@ def H_for_I_V3(I_I, update = False): if(useLaplacian & (not useDelays)): # WARNING: This has not been tested - # NOTE: We are acutally using the NEGATIVE Laplacian + # NOTE: We are actually using the NEGATIVE Laplacian Laplacian_diagonal = -torch.diag(torch.sum(self.Con_Mtx, axis=1)) #Con_Mtx should be normalized, so this should just add a diagonal of -1's S_E_laplacian = torch.matmul(self.Con_Mtx + Laplacian_diagonal, S_E) @@ -298,7 +298,7 @@ def H_for_I_V3(I_I, update = False): if(useDelays & useLaplacian): # WARNING: This has not been tested - # NOTE: We are acutally using the NEGATIVE Laplacian + # NOTE: We are actually using the NEGATIVE Laplacian Laplacian_diagonal = -torch.diag(torch.sum(self.Con_Mtx, axis=1)) #Con_Mtx should be normalized, so this should just add a diagonal of -1's @@ -384,7 +384,7 @@ def blockTS(data, blocks, numNodes, numSV): n = torch.numel(data) if (not (n%blocks == 0)): - print("ERROR: data is not divisable by blocks") + print("ERROR: data is not divisible by blocks") return newTimeDim = int(n/(blocks*numNodes*numSV)) diff --git a/whobpyt/models/RWWEI2/RWWEI2_validate.py b/whobpyt/models/RWWEI2/RWWEI2_validate.py index 091c6b7b..45b52ca8 100644 --- a/whobpyt/models/RWWEI2/RWWEI2_validate.py +++ b/whobpyt/models/RWWEI2/RWWEI2_validate.py @@ -2,7 +2,7 @@ # Multi-Modal Connectome-based Neural Mass Modelling # -# This is to simulate a RWW Network with addtional BOLD and EEG layers. +# This is to simulate a RWW Network with additional BOLD and EEG layers. ## EQUATIONS & BIOLOGICAL VARIABLES FROM: # diff --git a/whobpyt/optimization/custom_cost_mmRWW2.py b/whobpyt/optimization/custom_cost_mmRWW2.py index 84d51f1a..75d7c069 100644 --- a/whobpyt/optimization/custom_cost_mmRWW2.py +++ b/whobpyt/optimization/custom_cost_mmRWW2.py @@ -10,7 +10,7 @@ def __init__(self, num_regions, simKey, targetValue, device = torch.device('cpu' # Defining the Objective Function # --------------------------------------------------- # Written in such as way as to be able to adjust the relative importance of components that make up the objective function. - # Also, written in such a way as to be able to track and plot indiviual components losses over time. + # Also, written in such a way as to be able to track and plot individual components losses over time. # Weights of Objective Function Components self.S_E_mean_weight = 1 diff --git a/whobpyt/run/batchfitting.py b/whobpyt/run/batchfitting.py index 35fea022..3658b00a 100644 --- a/whobpyt/run/batchfitting.py +++ b/whobpyt/run/batchfitting.py @@ -89,7 +89,7 @@ def train(self, stim, empDatas, num_epochs, batch_size, learningrate = 0.05, sta delayHist = dummyVal # TODO: Delays are currently is not implemented in various places # initial the external inputs - external = dummyVal # TODO: Currenlty this code only works for resting state + external = dummyVal # TODO: Currently this code only works for resting state num_blocks = batch_size diff --git a/whobpyt/run/customfitting.py b/whobpyt/run/customfitting.py index 77e8164b..e67df0f8 100644 --- a/whobpyt/run/customfitting.py +++ b/whobpyt/run/customfitting.py @@ -58,7 +58,7 @@ def train(self, stim, empDatas, num_epochs, block_len, learningrate = 0.05, rese num_epochs : Int Number of epochs for training. block_len : Int - The number of simulation steps per block. Total number of steps should be divisable by this number. + The number of simulation steps per block. Total number of steps should be divisible by this number. learningrate : Float Learning rate used by backpropagation optimizer. resetIC : Bool @@ -95,7 +95,7 @@ def train(self, stim, empDatas, num_epochs, block_len, learningrate = 0.05, rese delayHist = torch.tensor(1.0, device = self.device) # TODO: Delays are currently is not implemented in various places # initial the external inputs - external = torch.tensor([0], device = self.device) # TODO: Currenlty this code only works for resting state + external = torch.tensor([0], device = self.device) # TODO: Currently this code only works for resting state num_blocks = int(self.model.sim_len/block_len) diff --git a/whobpyt/run/modelfitting.py b/whobpyt/run/modelfitting.py index 3f774359..4bf5e7b3 100644 --- a/whobpyt/run/modelfitting.py +++ b/whobpyt/run/modelfitting.py @@ -59,7 +59,7 @@ def __init__(self, model: AbstractNMM, cost: AbstractLoss, device = torch.device self.device = device self.trainingStats = TrainingStats(self.model) - self.lastRec = None #A dictionary or Recordings of the last simulation preformed (either training or evaluation) + self.lastRec = None #A dictionary or Recordings of the last simulation performed (either training or evaluation) #self.u = None #This is the ML "Training Input" #self.empTS = ts #This is the ML "Training Labels" - A list @@ -212,7 +212,7 @@ def train(self, u, empRecs: list, print('epoch: ', i_epoch, 'loss:', loss.detach().cpu().numpy(), - 'Pseudo FC_cor: ', np.corrcoef(fc_sim[mask_e], fc[mask_e])[0, 1], #Calling this Pseudo as different windows of the time series have slighly different parameter values + 'Pseudo FC_cor: ', np.corrcoef(fc_sim[mask_e], fc[mask_e])[0, 1], #Calling this Pseudo as different windows of the time series have slightly different parameter values 'cos_sim: ', np.diag(cosine_similarity(ts_sim, ts_emp)).mean()) if lr_scheduler: @@ -224,7 +224,7 @@ def train(self, u, empRecs: list, self.trainingStats.appendLoss(np.mean(loss_his)) # NMM/Other Parameter info for the Epoch (a list where a number is recorded every window of every record) trackedParam = {} - exclude_param = ['gains_con', 'lm'] #This stores SC and LF which are saved seperately + exclude_param = ['gains_con', 'lm'] #This stores SC and LF which are saved separately if(self.model.track_params): for par_name in self.model.track_params: var = getattr(self.model.params, par_name) @@ -334,7 +334,7 @@ def simulate(self, u, numTP: int, base_window_num: int = 0, transient_num: int = u : int or Tensor external or stimulus numTP : int - The number of time points ot simulate + The number of time points to simulate base_window_num : int length of num_windows for resting transient_num : int