From 225ba244cbac7070eff307daf65c7687bf6eecb5 Mon Sep 17 00:00:00 2001 From: Casper da Costa-Luis Date: Thu, 29 Jan 2026 09:29:52 +0000 Subject: [PATCH 1/5] add Callback.interval - fixes #1909 --- .../cil/optimisation/utilities/callbacks.py | 29 +++++++++---------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/utilities/callbacks.py b/Wrappers/Python/cil/optimisation/utilities/callbacks.py index 6eb5cff21..a2adaa9a3 100644 --- a/Wrappers/Python/cil/optimisation/utilities/callbacks.py +++ b/Wrappers/Python/cil/optimisation/utilities/callbacks.py @@ -20,9 +20,9 @@ from abc import ABC, abstractmethod from functools import partialmethod +import numpy as np from tqdm.auto import tqdm as tqdm_auto from tqdm.std import tqdm as tqdm_std -import numpy as np tqdm_std.monitor_interval = 0 # disable background monitoring thread @@ -34,14 +34,20 @@ class Callback(ABC): ---------- verbose: int, choice of 0,1,2, default 1 0=quiet, 1=info, 2=debug. + interval: int, default 1 + Number of algorithm iterations between callback calls. ''' - def __init__(self, verbose=1): + def __init__(self, verbose=1, interval=1): self.verbose = verbose + self.interval = interval @abstractmethod def __call__(self, algorithm): pass + def skip_iteration(self, algorithm) -> bool: + return algorithm.iteration % min(self.interval, max(1, algorithm.update_objective_interval)) != 0 and algorithm.iteration != algorithm.max_iteration + class _OldCallback(Callback): '''Converts an old-style :code:`def callback` to a new-style :code:`class Callback`. @@ -68,8 +74,8 @@ class ProgressCallback(Callback): **tqdm_kwargs: Passed to :code:`tqdm_class`. ''' - def __init__(self, verbose=1, tqdm_class=tqdm_auto, **tqdm_kwargs): - super().__init__(verbose=verbose) + def __init__(self, verbose=1, interval=1, tqdm_class=tqdm_auto, **tqdm_kwargs): + super().__init__(verbose=verbose, interval=interval) self.tqdm_class = tqdm_class self.tqdm_kwargs = tqdm_kwargs self._obj_len = 0 # number of objective updates @@ -80,6 +86,7 @@ def __call__(self, algorithm): tqdm_kwargs.setdefault('total', algorithm.max_iteration) tqdm_kwargs.setdefault('disable', not self.verbose) tqdm_kwargs.setdefault('initial', max(0, algorithm.iteration)) + tqdm_kwargs.setdefault('miniters', min(self.interval, max(1, algorithm.update_objective_interval))) self.pbar = self.tqdm_class(**tqdm_kwargs) if (obj_len := len(algorithm.objective)) != self._obj_len: self.pbar.set_postfix(algorithm.objective_to_dict(self.verbose>=2), refresh=False) @@ -130,18 +137,11 @@ class TextProgressCallback(ProgressCallback): Parameters ---------- - miniters: int, default :code:`Algorithm.update_objective_interval` + miniters: int, default :code:`min(self.interval, max(1, Algorithm.update_objective_interval))` Number of algorithm iterations between screen prints. ''' __init__ = partialmethod(ProgressCallback.__init__, tqdm_class=_TqdmText) - def __call__(self, algorithm): - if not hasattr(self, 'pbar'): - self.tqdm_kwargs['miniters'] = min(( - self.tqdm_kwargs.get('miniters', algorithm.update_objective_interval), - algorithm.update_objective_interval)) - return super().__call__(algorithm) - class LogfileCallback(TextProgressCallback): ''':code:`TextProgressCallback` but to a file instead of screen. @@ -157,6 +157,7 @@ def __init__(self, log_file, mode='a', **kwargs): self.fd = open(log_file, mode=mode) super().__init__(file=self.fd, **kwargs) + class EarlyStoppingObjectiveValue(Callback): '''Callback that stops iterations if the change in the objective value is less than a provided threshold value. @@ -172,12 +173,12 @@ class EarlyStoppingObjectiveValue(Callback): def __init__(self, threshold=1e-6): self.threshold=threshold - def __call__(self, algorithm): if len(algorithm.loss)>=2: if np.abs(algorithm.loss[-1]-algorithm.loss[-2]) \omega`, where `omega` is set to default as 1e6. @@ -197,9 +198,7 @@ def __init__(self, epsilon=1e-6, omega=1e6): self.epsilon=epsilon self.omega=omega - def __call__(self, algorithm): - if (algorithm.norms <= algorithm.norms0 * self.epsilon): print('The norm of the residual is less than {} times the norm of the initial residual and so the algorithm is terminated'.format(self.epsilon)) raise StopIteration From 779657ef7850b155e5c358bdd34dacedbc79a7fa Mon Sep 17 00:00:00 2001 From: Casper da Costa-Luis Date: Thu, 29 Jan 2026 09:47:56 +0000 Subject: [PATCH 2/5] add CSVCallback --- .../cil/optimisation/utilities/callbacks.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/Wrappers/Python/cil/optimisation/utilities/callbacks.py b/Wrappers/Python/cil/optimisation/utilities/callbacks.py index a2adaa9a3..d9a1e6005 100644 --- a/Wrappers/Python/cil/optimisation/utilities/callbacks.py +++ b/Wrappers/Python/cil/optimisation/utilities/callbacks.py @@ -17,8 +17,10 @@ # - CIL Developers, listed at: https://github.com/TomographicImaging/CIL/blob/master/NOTICE.txt +import csv from abc import ABC, abstractmethod from functools import partialmethod +from pathlib import Path import numpy as np from tqdm.auto import tqdm as tqdm_auto @@ -158,6 +160,23 @@ def __init__(self, log_file, mode='a', **kwargs): super().__init__(file=self.fd, **kwargs) +class CSVCallback(Callback): + """Saves :code:`algo.loss` in :code:`csv_file`""" + def __init__(self, csv_file='objectives.csv', append=False, **kwargs): + super().__init__(**kwargs) + csv_file = Path(csv_file) + csv_file.parent.mkdir(parents=True, exist_ok=True) + if csv_file.exists() and append: + self.csv = csv.writer(csv_file.open('a', buffering=1)) + else: + self.csv = csv.writer(csv_file.open('w', buffering=1)) + self.csv.writerow(("iteration", "objective")) + + def __call__(self, algorithm): + if not self.skip_iteration(algorithm): + self.csv.writerow((algorithm.iteration, algorithm.get_last_loss())) + + class EarlyStoppingObjectiveValue(Callback): '''Callback that stops iterations if the change in the objective value is less than a provided threshold value. From 47a56d50bfa9fedd573f6744625cdda66495bb6e Mon Sep 17 00:00:00 2001 From: Casper da Costa-Luis Date: Thu, 29 Jan 2026 09:48:34 +0000 Subject: [PATCH 3/5] add TimingCallback --- .../cil/optimisation/utilities/callbacks.py | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/Wrappers/Python/cil/optimisation/utilities/callbacks.py b/Wrappers/Python/cil/optimisation/utilities/callbacks.py index d9a1e6005..11193f10f 100644 --- a/Wrappers/Python/cil/optimisation/utilities/callbacks.py +++ b/Wrappers/Python/cil/optimisation/utilities/callbacks.py @@ -21,6 +21,8 @@ from abc import ABC, abstractmethod from functools import partialmethod from pathlib import Path +from time import time +from typing import List, Optional import numpy as np from tqdm.auto import tqdm as tqdm_auto @@ -177,6 +179,28 @@ def __call__(self, algorithm): self.csv.writerow((algorithm.iteration, algorithm.get_last_loss())) +class TimingCallback(Callback): + """ + Measures time taken by each iteration in :code:`self.times`, + excluding time taken by other specified (nested) :code:`callbacks`. + """ + def __init__(self, callbacks: Optional[List[Callback]]=None, **kwargs): + super().__init__(**kwargs) + self.times = [] + self.callbacks = callbacks or [] + self.reset() + + def reset(self): + self.offset = time() + + def __call__(self, algorithm): + time_excluding_callbacks = (now := time()) - self.offset + self.times.append(time_excluding_callbacks) + for c in self.callbacks: + c(algorithm) + self.offset += time() - now + + class EarlyStoppingObjectiveValue(Callback): '''Callback that stops iterations if the change in the objective value is less than a provided threshold value. From c202ab0982b6a7a45a6e5f0fd4565afd675d33b8 Mon Sep 17 00:00:00 2001 From: Casper da Costa-Luis Date: Thu, 29 Jan 2026 09:52:25 +0000 Subject: [PATCH 4/5] document new callbacks --- CHANGELOG.md | 3 + docs/source/optimisation.rst | 114 ++++++++++++++++++----------------- 2 files changed, 63 insertions(+), 54 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 81227615c..19ef3a0d1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,9 @@ * XX.X.X - New features: - LSQR algorithm added to the CIL algorithm class (#1975) + - Callback improvements: + - Added `Callback.interval` and `Callback.skip_iteration(Algorithm)` convenience methods (#1909) + - Added `TimingCallback` and `CSVCallback` - Bug fixes: - `CentreOfRotationCorrector.image_sharpness` data is now correctly smoothed to reduce aliasing artefacts and improve robustness. (#2202) - `PaganinProcessor` now correctly applies scaling with magnification for cone-beam geometry (#2225) diff --git a/docs/source/optimisation.rst b/docs/source/optimisation.rst index ac747779d..37e660a88 100644 --- a/docs/source/optimisation.rst +++ b/docs/source/optimisation.rst @@ -101,7 +101,7 @@ SIRT ISTA/PGD -------- -The Iterative Soft Thresholding Algorithm (ISTA) is also known as Proximal Gradient Descent (PGD). Note that in CIL, :ref:`PGD` is an alias of `ISTA`. +The Iterative Soft Thresholding Algorithm (ISTA) is also known as Proximal Gradient Descent (PGD). Note that in CIL, :ref:`PGD` is an alias of `ISTA`. .. _ISTA: .. autoclass:: cil.optimisation.algorithms.ISTA @@ -111,7 +111,7 @@ The Iterative Soft Thresholding Algorithm (ISTA) is also known as Proximal Gradi FISTA ----- -The Fast Iterative Soft Thresholding Algorithm (FISTA). +The Fast Iterative Soft Thresholding Algorithm (FISTA). .. _FISTA: .. autoclass:: cil.optimisation.algorithms.FISTA @@ -120,7 +120,7 @@ The Fast Iterative Soft Thresholding Algorithm (FISTA). APGD ----- -The Accelerated Proximal Gradient Descent Algorithm (APGD). This is an extension of the PGD/ISTA algorithm allowing you to either use a constant momemtum or a momentum that is updated at each iteration. +The Accelerated Proximal Gradient Descent Algorithm (APGD). This is an extension of the PGD/ISTA algorithm allowing you to either use a constant momemtum or a momentum that is updated at each iteration. .. autoclass:: cil.optimisation.algorithms.APGD :members: @@ -128,10 +128,10 @@ The Accelerated Proximal Gradient Descent Algorithm (APGD). This is an extension Current options for are based on the scalar momentum, with base class: -.. autoclass:: cil.optimisation.algorithms.APGD.ScalarMomentumCoefficient +.. autoclass:: cil.optimisation.algorithms.APGD.ScalarMomentumCoefficient :members: -Implemented examples are: +Implemented examples are: .. autoclass:: cil.optimisation.algorithms.APGD.ConstantMomentum :members: @@ -164,25 +164,25 @@ PD3O Algorithms (Stochastic) ======================== -Consider optimisation problems that take the form of a separable sum: +Consider optimisation problems that take the form of a separable sum: .. math:: \min_{x} f(x)+g(x) = \min_{x} \sum_{i=0}^{n-1} f_{i}(x) + g(x) = \min_{x} (f_{0}(x) + f_{1}(x) + ... + f_{n-1}(x))+g(x) -where :math:`n` is the number of functions. Where there is a large number of :math:`f_i` or their gradients are expensive to calculate, stochastic optimisation methods could prove more efficient. -There is a growing range of Stochastic optimisation algorithms available with potential benefits of faster convergence in number of iterations or in computational cost. -This is an area of continued development for CIL and, depending on the properties of the :math:`f_i` and the regulariser :math:`g`, there is a range of different options for the user. +where :math:`n` is the number of functions. Where there is a large number of :math:`f_i` or their gradients are expensive to calculate, stochastic optimisation methods could prove more efficient. +There is a growing range of Stochastic optimisation algorithms available with potential benefits of faster convergence in number of iterations or in computational cost. +This is an area of continued development for CIL and, depending on the properties of the :math:`f_i` and the regulariser :math:`g`, there is a range of different options for the user. SPDHG ----- -Stochastic Primal Dual Hybrid Gradient (SPDHG) is a stochastic version of PDHG and deals with optimisation problems of the form: - +Stochastic Primal Dual Hybrid Gradient (SPDHG) is a stochastic version of PDHG and deals with optimisation problems of the form: + .. math:: - + \min_{x} f(Kx) + g(x) = \min_{x} \sum f_i(K_i x) + g(x) -where :math:`f_i` and the regulariser :math:`g` need only be proper, convex and lower semi-continuous ( i.e. do not need to be differentiable). +where :math:`f_i` and the regulariser :math:`g` need only be proper, convex and lower semi-continuous ( i.e. do not need to be differentiable). Each iteration considers just one index of the sum, potentially reducing computational cost. For more examples see our [user notebooks]( https://github.com/vais-ral/CIL-Demos/blob/master/Tomography/Simulated/Single%20Channel/PDHG_vs_SPDHG.py). @@ -194,23 +194,23 @@ Each iteration considers just one index of the sum, potentially reducing computa Approximate gradient methods ---------------------------------- -Alternatively, consider that, in addition to the functions :math:`f_i` and the regulariser :math:`g` being proper, convex and lower semi-continuous, the :math:`f_i` are differentiable. In this case we consider stochastic methods that replace a gradient calculation in a deterministic algorithm with a, potentially cheaper to calculate, approximate gradient. +Alternatively, consider that, in addition to the functions :math:`f_i` and the regulariser :math:`g` being proper, convex and lower semi-continuous, the :math:`f_i` are differentiable. In this case we consider stochastic methods that replace a gradient calculation in a deterministic algorithm with a, potentially cheaper to calculate, approximate gradient. For example, when :math:`g(x)=0`, the standard Gradient Descent algorithm utilises iterations of the form .. math:: x_{k+1}=x_k-\alpha \nabla f(x_k) =x_k-\alpha \sum_{i=0}^{n-1}\nabla f_i(x_k). -:math:`\nabla f(x_k)=\sum_{i=0}^{n-1}\nabla f_i(x_k)` with :math:`n \nabla f_i(x_k)`, for an index :math:`i` which changes each iteration, leads to the well known stochastic gradient descent algorithm. +:math:`\nabla f(x_k)=\sum_{i=0}^{n-1}\nabla f_i(x_k)` with :math:`n \nabla f_i(x_k)`, for an index :math:`i` which changes each iteration, leads to the well known stochastic gradient descent algorithm. -Replacing, :math:`\nabla f(x_k)=\sum_{i=0}^{n-1}\nabla f_i(x_k)` with :math:`n \nabla f_i(x_k)`, for an index :math:`i` which changes each iteration, leads to the well known stochastic gradient descent algorithm. +Replacing, :math:`\nabla f(x_k)=\sum_{i=0}^{n-1}\nabla f_i(x_k)` with :math:`n \nabla f_i(x_k)`, for an index :math:`i` which changes each iteration, leads to the well known stochastic gradient descent algorithm. -In addition, if :math:`g(x)\neq 0` and has a calculable proximal ( need not be differentiable) one can consider ISTA iterations: +In addition, if :math:`g(x)\neq 0` and has a calculable proximal ( need not be differentiable) one can consider ISTA iterations: .. math:: x_{k+1}=prox_{\alpha g}(x_k-\alpha \nabla f(x_k) )=prox_{\alpha g}(x_k-\alpha \sum_{i=0}^{n-1}\nabla f_i(x_k)) -and again replacing :math:`\nabla f(x_k)=\sum_{i=0}^{n-1}\nabla f_i(x_k)` with an approximate gradient. +and again replacing :math:`\nabla f(x_k)=\sum_{i=0}^{n-1}\nabla f_i(x_k)` with an approximate gradient. In a similar way, plugging approximate gradient calculations into deterministic algorithms can lead to a range of stochastic algorithms. In the following table, the left hand column has the approximate gradient function subclass, :ref:`Approximate Gradient base class` the header row has one of CIL's deterministic optimisation algorithm and the body of the table has the resulting stochastic algorithm. @@ -228,9 +228,9 @@ In a similar way, plugging approximate gradient calculations into deterministic | LSVRGFunction\| LSVRG | Prox-LSVRG | Acc-Prox-LSVRG | +----------------+-------+------------+----------------+ -\*In development +\*In development -The stochastic gradient functions can be found listed under functions in the documentation. +The stochastic gradient functions can be found listed under functions in the documentation. Stochastic Gradient Descent Example ---------------------------------- @@ -239,52 +239,52 @@ The below is an example of Stochastic Gradient Descent built of the SGFunction a .. code-block :: python from cil.optimisation.utilities import Sampler - from cil.optimisation.algorithms import GD + from cil.optimisation.algorithms import GD from cil.optimisation.functions import LeastSquares, SGFunction from cil.utilities import dataexample from cil.plugins.astra.operators import ProjectionOperator - - # get the data + + # get the data data = dataexample.SIMULATED_PARALLEL_BEAM_DATA.get() data.reorder('astra') data = data.get_slice(vertical='centre') - # create the geometries - ag = data.geometry + # create the geometries + ag = data.geometry ig = ag.get_ImageGeometry() # partition the data and build the projectors - n_subsets = 10 + n_subsets = 10 partitioned_data = data.partition(n_subsets, 'sequential') A_partitioned = ProjectionOperator(ig, partitioned_data.geometry, device = "cpu") - # create the list of functions for the stochastic sum + # create the list of functions for the stochastic sum list_of_functions = [LeastSquares(Ai, b=bi) for Ai,bi in zip(A_partitioned, partitioned_data)] - #define the sampler and the stochastic gradient function + #define the sampler and the stochastic gradient function sampler = Sampler.staggered(len(list_of_functions), stride=2) - f = SGFunction(list_of_functions, sampler=sampler) - - #set up and run the gradient descent algorithm + f = SGFunction(list_of_functions, sampler=sampler) + + #set up and run the gradient descent algorithm alg = GD(initial=ig.allocate(0), objective_function=f, step_size=1/f.L) alg.run(300) Note ---- - All the approximate gradients written in CIL are of a similar order of magnitude to the full gradient calculation. For example, in the :code:`SGFunction` we approximate the full gradient by :math:`n\nabla f_i` for an index :math:`i` given by the sampler. + All the approximate gradients written in CIL are of a similar order of magnitude to the full gradient calculation. For example, in the :code:`SGFunction` we approximate the full gradient by :math:`n\nabla f_i` for an index :math:`i` given by the sampler. The multiplication by :math:`n` is a choice to more easily allow comparisons between stochastic and non-stochastic methods and between stochastic methods with varying numbers of subsets. The multiplication ensures that the (SAGA, SGD, and SVRG and LSVRG) approximate gradients are an unbiased estimator of the full gradient ie :math:`\mathbb{E}\left[\tilde\nabla f(x)\right] =\nabla f(x)`. - This has an implication when choosing step sizes. For example, a suitable step size for GD with a SGFunction could be - :math:`\propto 1/(L_{max}*n)`, where :math:`L_{max}` is the largest Lipschitz constant of the list of functions in the SGFunction and the additional factor of :math:`n` reflects this multiplication by :math:`n` in the approximate gradient. + This has an implication when choosing step sizes. For example, a suitable step size for GD with a SGFunction could be + :math:`\propto 1/(L_{max}*n)`, where :math:`L_{max}` is the largest Lipschitz constant of the list of functions in the SGFunction and the additional factor of :math:`n` reflects this multiplication by :math:`n` in the approximate gradient. + - Memory requirements ------------------- Note that the approximate gradient methods have different memory requirements: + The `SGFunction` has the same requirements as a `SumFunction`, so no increased memory usage -+ `SAGFunction` and `SAGAFunction` both store `n+3` times the image size in memory to store the last calculated gradient for each function in the sum and for intermediary calculations. -+ `SVRGFunction` and `LSVRGFunction` with the default `store_gradients = False` store 4 times the image size in memory, including the "snapshot" point and gradient. If `store_gradients = True`, some computational effort is saved, at the expensive of stored memory `n+4` times the image size. ++ `SAGFunction` and `SAGAFunction` both store `n+3` times the image size in memory to store the last calculated gradient for each function in the sum and for intermediary calculations. ++ `SVRGFunction` and `LSVRGFunction` with the default `store_gradients = False` store 4 times the image size in memory, including the "snapshot" point and gradient. If `store_gradients = True`, some computational effort is saved, at the expensive of stored memory `n+4` times the image size. Operators @@ -542,7 +542,7 @@ Total variation :members: :inherited-members: -Function of Absolute Value +Function of Absolute Value -------------------------- .. autoclass:: cil.optimisation.functions.FunctionOfAbs @@ -550,47 +550,47 @@ Function of Absolute Value :inherited-members: -Approximate Gradient base class +Approximate Gradient base class -------------------------------- -.. autoclass:: cil.optimisation.functions.ApproximateGradientSumFunction +.. autoclass:: cil.optimisation.functions.ApproximateGradientSumFunction :members: :inherited-members: - -Stochastic Gradient function + +Stochastic Gradient function ----------------------------- -.. autoclass:: cil.optimisation.functions.SGFunction +.. autoclass:: cil.optimisation.functions.SGFunction :members: :inherited-members: SAG function ------------- -.. autoclass:: cil.optimisation.functions.SAGFunction +.. autoclass:: cil.optimisation.functions.SAGFunction :members: :inherited-members: SAGA function -------------- -.. autoclass:: cil.optimisation.functions.SAGAFunction +.. autoclass:: cil.optimisation.functions.SAGAFunction :members: :inherited-members: -Stochastic Variance Reduced Gradient Function +Stochastic Variance Reduced Gradient Function ---------------------------------------------- -.. autoclass:: cil.optimisation.functions.SVRGFunction +.. autoclass:: cil.optimisation.functions.SVRGFunction :members: :inherited-members: -Loopless Stochastic Variance Reduced Gradient Function +Loopless Stochastic Variance Reduced Gradient Function ---------------------------------------------- -.. autoclass:: cil.optimisation.functions.LSVRGFunction +.. autoclass:: cil.optimisation.functions.LSVRGFunction :members: :inherited-members: @@ -625,12 +625,12 @@ For ease of use we provide the following static methods in `cil.optimisation.uti They will all instantiate a Sampler defined in the following class: .. autoclass:: cil.optimisation.utilities.Sampler - + The random samplers are instantiated from a random sampling class which is a child class of `cil.optimisation.utilities.sampler` and provides options for sampling with and without replacement: .. autoclass:: cil.optimisation.utilities.SamplerRandom - + Callbacks --------- @@ -657,6 +657,12 @@ Built-in callbacks include: .. autoclass:: cil.optimisation.utilities.callbacks.LogfileCallback :members: +.. autoclass:: cil.optimisation.utilities.callbacks.CSVCallback + :members: + +.. autoclass:: cil.optimisation.utilities.callbacks.TimingCallback + :members: + .. autoclass:: cil.optimisation.utilities.callbacks.EarlyStoppingObjectiveValue :members: @@ -700,9 +706,9 @@ In each iteration of the :code:`TestAlgo`, the objective :math:`x` is reduced by 15%|███ | 3/20 [00:00<00:00, 11770.73it/s, objective=3.05e-5] -Step size methods +Step size methods ------------------ -A step size method is a class which acts on an algorithm and can be passed to `cil.optimisation.algorithm.GD`, `cil.optimisation.algorithm.ISTA` `cil.optimisation.algorithm.FISTA` and it's method `get_step_size` is called after the calculation of the gradient before the gradient descent step is taken. It outputs a float value to be used as the step-size. +A step size method is a class which acts on an algorithm and can be passed to `cil.optimisation.algorithm.GD`, `cil.optimisation.algorithm.ISTA` `cil.optimisation.algorithm.FISTA` and it's method `get_step_size` is called after the calculation of the gradient before the gradient descent step is taken. It outputs a float value to be used as the step-size. Currently in CIL we have a base class: @@ -724,7 +730,7 @@ We also have a number of example classes: Preconditioners ---------------- -A preconditioner is a class which acts on an algorithm and can be passed to `cil.optimisation.algorithm.GD`, `cil.optimisation.algorithm.ISTA` or `cil.optimisation.algorithm.FISTA` and it's method `apply` is called after the calculation of the gradient before the gradient descent step is taken. It modifies and returns a passed `gradient`. +A preconditioner is a class which acts on an algorithm and can be passed to `cil.optimisation.algorithm.GD`, `cil.optimisation.algorithm.ISTA` or `cil.optimisation.algorithm.FISTA` and it's method `apply` is called after the calculation of the gradient before the gradient descent step is taken. It modifies and returns a passed `gradient`. Currently in CIL we have a base class: From caf124af13906ab175e5c89c51015a83b21f02e2 Mon Sep 17 00:00:00 2001 From: Casper da Costa-Luis Date: Thu, 29 Jan 2026 10:17:23 +0000 Subject: [PATCH 5/5] fix tests --- Wrappers/Python/test/test_algorithms.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/Wrappers/Python/test/test_algorithms.py b/Wrappers/Python/test/test_algorithms.py index 7908debf6..985dd1d20 100644 --- a/Wrappers/Python/test/test_algorithms.py +++ b/Wrappers/Python/test/test_algorithms.py @@ -1665,11 +1665,10 @@ def old_callback(iteration, objective, solution): log = NamedTemporaryFile(delete=False) log.close() - algo.run(20, callbacks=[callbacks.LogfileCallback( - log.name)], callback=old_callback) + algo.run(20, callbacks=[callbacks.LogfileCallback(log.name, interval=5)], callback=old_callback) with open(log.name, 'r') as fd: self.assertListEqual( - ["64/83", "74/83", "83/83", ""], + ['64/83', '69/83', '74/83', '79/83', '83/83', ''], [line.lstrip().split(" ", 1)[0] for line in fd.readlines()]) unlink(log.name)