diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index 28d0203f23..d4ba265013 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -27,10 +27,10 @@ jobs:
runs-on: [self-hosted, python, cuda]
strategy:
matrix:
- python-version: [3.11]
- numpy-version: [1.25]
+ python-version: [3.12]
+ numpy-version: [1.26]
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v5
with: {fetch-depth: 0, submodules: recursive}
- id: reqs
name: set requirements
@@ -67,12 +67,12 @@ jobs:
matrix:
include:
- {python-version: '3.10', numpy-version: 1.23}
- - {python-version: 3.12, numpy-version: 1.26}
+ - {python-version: 3.12, numpy-version: 2}
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v5
with: {fetch-depth: 0, submodules: recursive}
- name: set requirements
- run: sed -ri -e '/ python /d' -e 's/(.* numpy) .*/\1=${{ matrix.numpy-version }}/' -e 's/=cuda*//' -e '/tigre/d' scripts/requirements-test.yml
+ run: sed -ri -e '/ python /d' -e 's/(.* numpy) .*/\1=${{ matrix.numpy-version }}/' -e 's/ cuda*//' -e '/tigre/d' scripts/requirements-test.yml
- uses: conda-incubator/setup-miniconda@v3
with:
python-version: ${{ matrix.python-version }}
@@ -109,8 +109,8 @@ jobs:
fi
fi
if $include_max; then
- echo "include=[{'python-version': 3.12, 'os': 'ubuntu'}, {'python-version': 3.12, 'os': 'windows'}]" >> $GITHUB_OUTPUT
- echo "include-numpy=[{'python-version': 3.12, 'numpy-version': 1.26, 'os': 'ubuntu'}, {'python-version': 3.12, 'numpy-version': 1.26, 'os': 'windows'}]" >> $GITHUB_OUTPUT
+ echo "include=[{'python-version': 3.13, 'os': 'ubuntu'}, {'python-version': 3.13, 'os': 'windows'}]" >> $GITHUB_OUTPUT
+ echo "include-numpy=[{'python-version': 3.13, 'numpy-version': 2, 'os': 'ubuntu'}, {'python-version': 3.13, 'numpy-version': 2, 'os': 'windows'}]" >> $GITHUB_OUTPUT
else
echo "include=[]" >> $GITHUB_OUTPUT
echo "include-numpy=[]" >> $GITHUB_OUTPUT
@@ -125,7 +125,7 @@ jobs:
os: ${{ fromJson(needs.conda-matrix.outputs.os) }}
include: ${{ fromJson(needs.conda-matrix.outputs.include) }}
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v5
with:
fetch-depth: 0
submodules: recursive
@@ -158,7 +158,7 @@ jobs:
os: ${{ fromJson(needs.conda-matrix.outputs.os) }}
include: ${{ fromJson(needs.conda-matrix.outputs.include-numpy) }}
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v5
- uses: conda-incubator/setup-miniconda@v3
with:
python-version: ${{ matrix.python-version }}
@@ -166,7 +166,7 @@ jobs:
channels: conda-forge
conda-remove-defaults: "true"
- run: conda install boa
- - uses: actions/download-artifact@v4
+ - uses: actions/download-artifact@v5
with:
name: cil-py${{ matrix.python-version }}-${{ matrix.os }}
path: dist
@@ -180,13 +180,13 @@ jobs:
runs-on: ubuntu-latest
needs: conda-test
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v5
- uses: conda-incubator/setup-miniconda@v3
with:
mamba-version: "*"
channels: conda-forge
conda-remove-defaults: "true"
- - uses: actions/download-artifact@v4
+ - uses: actions/download-artifact@v5
with: {pattern: cil-py*-*, path: dist, merge-multiple: true}
- run: mamba install anaconda-client
- name: anaconda upload -c ccpi
@@ -205,7 +205,7 @@ jobs:
defaults: {run: {shell: 'bash -el {0}', working-directory: docs}}
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v5
with:
fetch-depth: 0
submodules: recursive
@@ -229,7 +229,7 @@ jobs:
conda list
- run: pip install ..
- name: checkout docs
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
with:
path: docs/build
ref: gh-pages
@@ -262,7 +262,7 @@ jobs:
contents: read
packages: write
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v5
with:
fetch-depth: 0
submodules: recursive
diff --git a/CHANGELOG.md b/CHANGELOG.md
index e37f618d58..052b27aec9 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -7,6 +7,10 @@
- Dependencies:
- olefile and dxchange are optional dependencies, instead of required (#2209)
- improve `tqdm` notebook support (#2241)
+ - Added support for numpy 2
+ - Update to CCPi-Regularisation toolkit 25.0.0
+ - Added support for python 3.13
+ - Added support for astra-toolbox 2.4
- Documentation:
- Render the user showcase notebooks in the documentation (#2189)
- Enhancements:
diff --git a/Dockerfile b/Dockerfile
index 5c508e0d71..d715e72423 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -9,13 +9,13 @@ LABEL org.opencontainers.image.source=https://github.com/TomographicImaging/CIL
LABEL org.opencontainers.image.licenses="Apache-2.0 AND BSD-3-Clause AND GPL-3.0"
# CUDA-specific packages
-ARG CIL_EXTRA_PACKAGES="tigre=2.6 astra-toolbox=2.1.0=cuda*"
+ARG CIL_EXTRA_PACKAGES="tigre=2.6 astra-toolbox::astra-toolbox=2.4=cuda* ccpi::ccpi-regulariser=25.0.0=cuda*"
# build & runtime dependencies
# TODO: sync scripts/create_local_env_for_cil_development.sh, scripts/requirements-test.yml, recipe/meta.yaml (e.g. missing libstdcxx-ng _openmp_mutex pip)?
# vis. https://github.com/TomographicImaging/CIL/pull/1590
COPY --chown="${NB_USER}" scripts/requirements-test.yml environment.yml
# channel_priority: https://stackoverflow.com/q/58555389
-RUN sed -ri '/tigre|astra-toolbox| python /d' environment.yml \
+RUN sed -ri -e '/tigre| python /d' -e 's/ cuda*//' environment.yml \
&& for pkg in 'jupyter-server-proxy>4.1.0' $CIL_EXTRA_PACKAGES; do echo " - $pkg" >> environment.yml; done \
&& conda config --env --set channel_priority strict \
&& for ch in defaults nvidia ccpi https://software.repos.intel.com/python/conda conda-forge; do conda config --env --add channels $ch; done \
diff --git a/README.md b/README.md
index 9a9149b869..dd0155b2e1 100644
--- a/README.md
+++ b/README.md
@@ -19,7 +19,7 @@ Binary installation of CIL can be achieved with `conda`.
We recommend using either [`miniconda`](https://docs.conda.io/projects/miniconda/en/latest) or [`miniforge`](https://github.com/conda-forge/miniforge), which are both minimal installers for `conda`. We also recommend a `conda` version of at least `23.10` for quicker installation.
#### Create a conda environment with CIL
-We maintain an environment file with the required packages to run the [CIL demos](https://github.com/TomographicImaging/CIL-Demos) which you can use to create a new environment. This will have specific and tested versions of all dependencies that are outlined in the table below:
+We maintain an environment file with the required packages to run the [CIL demos](https://github.com/TomographicImaging/CIL-Demos) which you can use to create a new environment. This will have specific and tested versions of all dependencies that are outlined in the table below:
```sh
conda env create -f https://tomographicimaging.github.io/scripts/env/cil_demos.yml
@@ -47,13 +47,13 @@ While building the CIL package we test with specific versions of dependencies. T
| Package | Tested Version | Conda install command | Description | License |
|----|----|--------|--------|----|
-| [Python](https://www.python.org/) | 3.10 - 3.12 | `"python>=3.10,<=3.12"` || [PSF-2.0](https://docs.python.org/3/license.html) |
-| [Numpy](https://github.com/numpy/numpy) | 1.23 - 1.26 | `"numpy>=1.23,<2"` || [BSD-3-Clause](https://numpy.org/doc/stable/license.html) |
+| [Python](https://www.python.org/) | 3.10 - 3.13 | `"python>=3.10,<=3.13"` || [PSF-2.0](https://docs.python.org/3/license.html) |
+| [Numpy](https://github.com/numpy/numpy) | 1.23 - 2.3 | `"numpy>=1.23,<=2.3"` || [BSD-3-Clause](https://numpy.org/doc/stable/license.html) |
| [IPP](https://www.intel.com/content/www/us/en/developer/tools/oneapi/ipp.html#gs.gxwq5p) | 2021.12 | `-c https://software.repos.intel.com/python/conda ipp=2021.12` | The Intel Integrated Performance Primitives Library (required for the CIL recon class). | [ISSL](http://www.intel.com/content/www/us/en/developer/articles/license/end-user-license-agreement.html) |
|--|--| **Optional dependencies** |--|--|
-| [ASTRA toolbox](http://www.astra-toolbox.com) | 2.1 | CPU: `conda-forge::astra-toolbox=2.1=py*`
GPU: `conda-forge::astra-toolbox=2.1=cuda*` | CT projectors, FBP and FDK. | [GPL-3.0](https://github.com/astra-toolbox/astra-toolbox/blob/master/COPYING) |
+| [ASTRA toolbox](http://www.astra-toolbox.com) | 2.1 - 2.4 | CPU: `astra-toolbox::astra-toolbox=2.4=py*`
GPU: `astra-toolbox::astra-toolbox=2.4=cuda*` | CT projectors, FBP and FDK. | [GPL-3.0](https://github.com/astra-toolbox/astra-toolbox/blob/master/COPYING) |
| [TIGRE](https://github.com/CERN/TIGRE) | 2.6 | `ccpi::tigre=2.6` | CT projectors, FBP and FDK. | [BSD-3-Clause](https://github.com/CERN/TIGRE/blob/master/LICENSE.txt) |
-| [CCPi Regularisation Toolkit](https://github.com/TomographicImaging/CCPi-Regularisation-Toolkit) | 24.0.1 | `ccpi::ccpi-regulariser=24.0.1` | Toolbox of regularisation methods. | [Apache-2.0](https://github.com/TomographicImaging/CCPi-Regularisation-Toolkit/blob/master/LICENSE) |
+| [CCPi Regularisation Toolkit](https://github.com/TomographicImaging/CCPi-Regularisation-Toolkit) | 25.0.0 | CPU: `ccpi::ccpi-regulariser=25.0.0=cpu*`
GPU: `ccpi::ccpi-regulariser=25.0.0=cuda*` | Toolbox of regularisation methods. | [Apache-2.0](https://github.com/TomographicImaging/CCPi-Regularisation-Toolkit/blob/master/LICENSE) |
| [TomoPhantom](https://github.com/dkazanc/TomoPhantom) | [2.0.0](https://github.com/dkazanc/TomoPhantom/releases/tag/v2.0.0) | `ccpi::tomophantom=2.0.0` | Generates phantoms to use as test data. | [Apache-2.0](https://github.com/dkazanc/TomoPhantom/blob/master/LICENSE) |
| [ipykernel](https://github.com/ipython/ipykernel) || `ipykernel` | Provides the IPython kernel to run Jupyter notebooks. | [BSD-3-Clause](https://github.com/ipython/ipykernel/blob/main/LICENSE) |
| [ipywidgets](https://github.com/jupyter-widgets/ipywidgets) || `ipywidgets` | Enables visualisation tools within jupyter noteboooks. | [BSD-3-Clause](https://github.com/jupyter-widgets/ipywidgets/blob/main/LICENSE) |
@@ -62,7 +62,6 @@ While building the CIL package we test with specific versions of dependencies. T
|[olefile](https://github.com/decalage2/olefile)|>= 0.46|`olefile>=0.46`|Package to process Microsoft OLE2 files, used to read ZEISS data files.|[BSD-style (custom)](https://github.com/decalage2/olefile?tab=License-1-ov-file)|
|[dxchange](https://github.com/data-exchange/dxchange)||`dxchange`|Provides an interface with TomoPy for loading tomography data.|[BSD-style (custom)](https://github.com/data-exchange/dxchange?tab=License-1-ov-file)|
-
### Docker
Finally, CIL can be run via a Jupyter Notebook enabled Docker container:
diff --git a/Wrappers/Python/cil/framework/acquisition_data.py b/Wrappers/Python/cil/framework/acquisition_data.py
index 308f0d5462..741929f41b 100644
--- a/Wrappers/Python/cil/framework/acquisition_data.py
+++ b/Wrappers/Python/cil/framework/acquisition_data.py
@@ -27,7 +27,7 @@
class AcquisitionData(DataContainer, Partitioner):
"""
DataContainer for holding 2D or 3D sinogram
-
+
Parameters
----------
array : numpy.ndarray or DataContainer
@@ -36,10 +36,10 @@ class AcquisitionData(DataContainer, Partitioner):
If True, the array will be deep copied. If False, the array will be shallow copied.
geometry : AcquisitionGeometry
The geometry of the data. If the dtype of the array and geometry are different, the geometry dtype will be overridden.
-
+
**kwargs:
dtype : numpy.dtype
- Specify the data type of the AcquisitionData array, this is useful if you pass None to array and want to over-ride the dtype of the geometry.
+ Specify the data type of the AcquisitionData array, this is useful if you pass None to array and want to over-ride the dtype of the geometry.
If an array is passed, dtype must match the dtype of the array.
"""
@@ -84,7 +84,7 @@ def __init__(self,
if dtype is None:
dtype = geometry.dtype
array = numpy.empty(geometry.shape, dtype)
-
+
elif issubclass(type(array) , DataContainer):
array = array.as_array()
@@ -107,12 +107,12 @@ def __eq__(self, other):
'''
Check if two AcquisitionData objects are equal. This is done by checking if the geometry, data and dtype are equal.
Also, if the other object is a numpy.ndarray, it will check if the data and dtype are equal.
-
+
Parameters
----------
other: AcquisitionData or numpy.ndarray
The object to compare with.
-
+
Returns
-------
bool
@@ -123,12 +123,12 @@ def __eq__(self, other):
if numpy.array_equal(self.as_array(), other.as_array()) \
and self.geometry == other.geometry \
and self.dtype == other.dtype:
- return True
+ return True
elif numpy.array_equal(self.as_array(), other) and self.dtype==other.dtype:
return True
else:
return False
-
+
def _get_slice(self, **kwargs):
'''
Functionality of get_slice
@@ -165,8 +165,8 @@ def _get_slice(self, **kwargs):
return out
else:
return AcquisitionData(out.array, deep_copy=False, geometry=geometry_new)
-
-
+
+
def get_slice(self, *args, **kwargs):
'''
Returns a new AcquisitionData of a single slice in the requested direction.
@@ -205,10 +205,10 @@ def get_slice(self, *args, **kwargs):
raise TypeError(f"Cannot use 'projection' for geometries that use angles. Use 'angle' instead.")
elif key not in AcquisitionDimension and key != 'force':
raise TypeError(f"'{key}' not in allowed labels {AcquisitionDimension}.")
-
+
if args:
warnings.warn("Positional arguments for get_slice are deprecated. Use keyword arguments instead.", DeprecationWarning, stacklevel=2)
-
+
num_args = len(args)
if num_args > 0:
diff --git a/Wrappers/Python/cil/framework/block.py b/Wrappers/Python/cil/framework/block.py
index 40bdd8cdb9..e7b40f9b72 100644
--- a/Wrappers/Python/cil/framework/block.py
+++ b/Wrappers/Python/cil/framework/block.py
@@ -733,9 +733,7 @@ def __neg__(self):
return -1 * self
def dot(self, other):
-#
- tmp = [ self.containers[i].dot(other.containers[i]) for i in range(self.shape[0])]
- return sum(tmp)
+ return sum(self.containers[i].dot(other.containers[i]) for i in range(self.shape[0]))
def __len__(self):
diff --git a/Wrappers/Python/cil/framework/data_container.py b/Wrappers/Python/cil/framework/data_container.py
index f7321550e4..2614862f19 100644
--- a/Wrappers/Python/cil/framework/data_container.py
+++ b/Wrappers/Python/cil/framework/data_container.py
@@ -219,49 +219,49 @@ def reorder(self, order):
self.geometry.set_labels(dimension_labels_new)
def fill(self, array, **kwargs):
- '''Fills the internal data array with a DataContainer, numpy array, number
+ '''Fills the internal data array with a DataContainer, numpy array, number
or using a random fill method
Parameters
----------
array : DataContainer, numpy array, number or string
- The value or method with which to fill the DataContainer. Accepts a
+ The value or method with which to fill the DataContainer. Accepts a
numpy array or DataContainer or a number to allocate a uniform array
- or a string specifying a method to fill with a random array: 'random'
- allocates floats between 0 and 1, 'random_int' by default allocates
+ or a string specifying a method to fill with a random array: 'random'
+ allocates floats between 0 and 1, 'random_int' by default allocates
integers between 0 and 100 or between provided `min_value` and `max_value`.
**kwargs:
- **dimension : int, optional
- An optional named parameter to specify in which axis the fill should
- happen. The name passed must match one of the DataContainer
+ **dimension : int, optional
+ An optional named parameter to specify in which axis the fill should
+ happen. The name passed must match one of the DataContainer
dimension_labels and the axis must be an int
seed : int, optional
- A random seed to fix reproducibility, only used if `array` is a
+ A random seed to fix reproducibility, only used if `array` is a
`random method`. Default is `None`.
min_value : int, optional, default=0
- The minimum value random integer to generate, only used if `array`
+ The minimum value random integer to generate, only used if `array`
is 'random_int'. New since version 25.0.0 Default is 0.
max_value : int, optional, default=100
- The maximum value random integer to generate, only used if `array`
+ The maximum value random integer to generate, only used if `array`
is 'random_int'. Default is 100.
-
+
Note
----
- If the passed numpy array points to the same array that is contained
+ If the passed numpy array points to the same array that is contained
in the DataContainer, the DataContainer is not updated, and None is returned.
Note
----
- Since v25.0.0 the methods used by 'random' or 'random_int' use `numpy.random.default_rng`.
- This method does not use the global numpy.random.seed() so if a seed is
+ Since v25.0.0 the methods used by 'random' or 'random_int' use `numpy.random.default_rng`.
+ This method does not use the global numpy.random.seed() so if a seed is
required it should be passed directly as a kwarg.
- To fill random numbers using the earlier behaviour use `array='random_deprecated'`
- or `array='random_int_deprecated'`
-
+ To fill random numbers using the earlier behaviour use `array='random_deprecated'`
+ or `array='random_int_deprecated'`
+
Example
-------
This example shows how to pass a dimension to specify an axis to fill
@@ -277,7 +277,7 @@ def fill(self, array, **kwargs):
dimension_labels = self.dimension_labels
except AttributeError:
dimension_labels = None
-
+
if dimension_labels is not None:
for k in list(kwargs):
if k in self.dimension_labels:
@@ -288,37 +288,37 @@ def fill(self, array, **kwargs):
for k, v in dimension.items():
i = dimension_labels.index(k)
- index[i] = v
+ index[i] = v
index = tuple(index)
else:
index = (slice(None),) * self.array.ndim
if id(array) == id(self.array):
return
-
+
if isinstance(array, numpy.ndarray):
numpy.copyto(self.array[index], array)
-
+
elif isinstance(array, Number):
self.array[index] = array
-
+
elif issubclass(array.__class__ , DataContainer):
if dimension_labels is not None:
indexed_dimension_labels = [label for label in self.dimension_labels if label not in dimension]
if tuple(indexed_dimension_labels) != array.dimension_labels:
raise ValueError('Input array is not in the same order as destination array. Use "array.reorder()"')
-
+
if self.array[index].shape == array.shape:
numpy.copyto(self.array[index], array.array)
else:
raise ValueError('Cannot fill with the provided array.' + \
'Expecting shape {0} got {1}'.format(
self.array[index].shape, array.shape))
-
+
elif array in FillType:
seed = kwargs.pop("seed", None)
-
+
if array == FillType.RANDOM_DEPRECATED:
warnings.warn("RANDOM_DEPRECATED will be removed in a future version", DeprecationWarning, stacklevel=2)
if seed is not None:
@@ -328,7 +328,7 @@ def fill(self, array, **kwargs):
else:
r = numpy.random.random_sample(self.array[index].shape).astype(self.dtype)
self.array[index] = r
-
+
elif array == FillType.RANDOM_INT_DEPRECATED:
warnings.warn("RANDOM_INT_DEPRECATED will be removed in a future version", DeprecationWarning, stacklevel=2)
if seed is not None:
@@ -342,7 +342,7 @@ def fill(self, array, **kwargs):
self.array[index] = r
elif array == FillType.RANDOM:
-
+
rng = numpy.random.default_rng(seed)
if numpy.issubdtype(self.dtype, numpy.complexfloating):
complex_example = numpy.array([1 + 1j], dtype=self.dtype)
@@ -353,7 +353,7 @@ def fill(self, array, **kwargs):
self.array[index] = r
elif array == FillType.RANDOM_INT:
-
+
rng = numpy.random.default_rng(seed)
max_value = kwargs.pop("max_value", 100)
min_value = kwargs.pop("min_value", 0)
@@ -411,7 +411,7 @@ def __rtruediv__(self, other):
def __rpow__(self, other):
if isinstance(other, Number) :
- fother = numpy.ones(numpy.shape(self.array)) * other
+ fother = numpy.ones_like(self.array) * self.dtype.type(other)
return type(self)(fother ** self.array ,
dimension_labels=self.dimension_labels,
geometry=self.geometry)
@@ -492,19 +492,19 @@ def pixel_wise_binary(self, pwop, x2, *args, **kwargs):
if out is None:
if isinstance(x2, Number):
- out = pwop(self.as_array() , x2 , *args, **kwargs )
+ out = pwop(self.as_array(), self.dtype.type(x2), *args, **kwargs)
elif issubclass(x2.__class__ , DataContainer):
out = pwop(self.as_array() , x2.as_array() , *args, **kwargs )
elif isinstance(x2, numpy.ndarray):
out = pwop(self.as_array() , x2 , *args, **kwargs )
else:
raise TypeError('Expected x2 type as number or DataContainer, got {}'.format(type(x2)))
-
+
if self.geometry is None:
return type(self)(out,
deep_copy=False,
dimension_labels=self.dimension_labels)
-
+
return type(self)(out,
deep_copy=False,
geometry=self.geometry)
@@ -527,7 +527,7 @@ def pixel_wise_binary(self, pwop, x2, *args, **kwargs):
not (x2.shape == self.shape and x2.dtype == self.dtype):
raise ValueError(f"Wrong size for data memory: out {out.shape} x2 {x2.shape} expected {self.shape}")
kwargs['out']=out.as_array()
- pwop(self.as_array(), x2, *args, **kwargs )
+ pwop(self.as_array(), self.dtype.type(x2) if isinstance(x2, Number) else x2, *args, **kwargs )
return out
raise ValueError(f"Wrong size for data memory: {out.shape} {self.shape}")
elif issubclass(type(out), numpy.ndarray):
@@ -736,11 +736,11 @@ def pixel_wise_unary(self, pwop, *args, **kwargs):
return type(self)(out,
deep_copy=False,
dimension_labels=self.dimension_labels)
-
+
return type(self)(out,
deep_copy=False,
geometry=self.geometry)
-
+
elif issubclass(type(out), DataContainer):
if self.check_dimensions(out):
kwargs['out'] = out.as_array()
diff --git a/Wrappers/Python/cil/io/TIFF.py b/Wrappers/Python/cil/io/TIFF.py
index 70359f5cec..078bdeabc4 100644
--- a/Wrappers/Python/cil/io/TIFF.py
+++ b/Wrappers/Python/cil/io/TIFF.py
@@ -533,8 +533,8 @@ def _read_as(self, geometry):
'''reads the TIFF stack as an ImageData with the provided geometry'''
data = self.read()
if len(geometry.shape) == 4:
- gsize = functools.reduce(lambda x,y: x*y, geometry.shape, 1)
- dsize = functools.reduce(lambda x,y: x*y, data.shape, 1)
+ gsize = np.prod(geometry.shape)
+ dsize = np.prod(data.shape)
if gsize != dsize:
added_dims = len(geometry.shape) - len(data.shape)
if data.shape[0] != functools.reduce(lambda x,y: x*y, geometry.shape[:1+added_dims], 1):
diff --git a/Wrappers/Python/cil/optimisation/algorithms/PD3O.py b/Wrappers/Python/cil/optimisation/algorithms/PD3O.py
index 04ebf9570e..9547bad431 100644
--- a/Wrappers/Python/cil/optimisation/algorithms/PD3O.py
+++ b/Wrappers/Python/cil/optimisation/algorithms/PD3O.py
@@ -1,231 +1,230 @@
-# Copyright 2024 United Kingdom Research and Innovation
-# Copyright 2024 The University of Manchester
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# Authors:
-# CIL Developers, listed at: https://github.com/TomographicImaging/CIL/blob/master/NOTICE.txt
-
-
-from cil.optimisation.algorithms import Algorithm
-from cil.optimisation.functions import ZeroFunction, ScaledFunction, SGFunction, SVRGFunction, LSVRGFunction, SAGAFunction, SAGFunction, ApproximateGradientSumFunction
-import logging
-import warnings
-
-class FunctionWrappingForPD3O():
- """
- An internal class that wraps the functions, :math:`f`, in PD3O to allow :math:`f` to be an `ApproximateGradientSumFunction`.
-
- Note that currently :math:`f` can be any type of deterministic function, an `ApproximateGradientSumFunction` or a scaled `ApproximateGradientSumFunction` but this is not set up to work for a `SumFunction` or `TranslatedFunction` which contains `ApproximateGradientSumFunction`s.
-
- Parameters
- ----------
- f : function
- The function :math:`f` to use in PD3O
- """
- def __init__(self, f):
- self.f = f
- self.scalar = 1
- while isinstance(self.f, ScaledFunction):
- self.scalar *= self.f.scalar
- self.f = self.f.function
-
- self._gradient_call_index = 0
-
- if isinstance(self.f, (SVRGFunction, LSVRGFunction)):
- self.gradient = self.svrg_gradient
- elif isinstance(self.f, ApproximateGradientSumFunction):
- self.gradient = self.approximate_sum_function_gradient
- else:
- self.gradient = f.gradient
-
- def svrg_gradient(self, x, out=None):
- if self._gradient_call_index == 0:
- self._gradient_call_index += 1
- self.f.gradient(x, out)
- else:
- if len(self.f.data_passes_indices[-1]) == self.f.sampler.num_indices:
- self.f._update_full_gradient_and_return(x, out=out)
- else:
- self.f.approximate_gradient( x, self.f.function_num, out=out)
- self.f._data_passes_indices.pop(-1)
- self._gradient_call_index = 0
-
- if self.scalar != 1:
- out *= self.scalar
- return out
-
- def approximate_sum_function_gradient(self, x, out=None):
- if self._gradient_call_index == 0:
- self._gradient_call_index += 1
- self.f.gradient(x, out)
- else:
- self.f.approximate_gradient( x, self.f.function_num, out=out)
- self._gradient_call_index = 0
-
- if self.scalar != 1:
- out *= self.scalar
- return out
-
-
- def __call__(self, x):
- return self.scalar * self.f(x)
-
- @property
- def L(self):
- return abs(self.scalar) * self.f.L
-
-
-class PD3O(Algorithm):
-
-
- r"""Primal Dual Three Operator Splitting (PD3O) algorithm, see "A New Primal–Dual Algorithm for Minimizing the Sum
- of Three Functions with a Linear Operator". This is a primal dual algorithm for minimising :math:`f(x)+g(x)+h(Ax)` where all functions are proper, lower semi-continuous and convex,
- :math:`f` should be differentiable with a Lipschitz continuous gradient and :math:`A` is a bounded linear operator.
-
- Parameters
- ----------
- f : Function
- A smooth function with Lipschitz continuous gradient.
- g : Function
- A convex function with a computationally computable proximal.
- h : Function
- A composite convex function.
- operator: Operator
- Bounded linear operator
- delta: Float, optional, default is `1./(gamma*operator.norm()**2)`
- The dual step-size
- gamma: Float, optional, default is `2.0/f.L`
- The primal step size
- initial : DataContainer, optional default is a container of zeros, in the domain of the operator
- Initial point for the algorithm.
-
- Note
- -----
- Note that currently :math:`f` in PD3O can be any type of deterministic function, an `ApproximateGradientSumFunction` or a scaled `ApproximateGradientSumFunction`.
-
- Note
- -----
- We have not implemented or tested when :math:`f` is a stochastic function (`ApproximateGradientSumFunction`) wrapped as a `SumFunction` or `TranslatedFunction`.
-
- Reference
- ---------
- Yan, M. A New Primal–Dual Algorithm for Minimizing the Sum of Three Functions with a Linear Operator. J Sci Comput 76, 1698–1717 (2018). https://doi.org/10.1007/s10915-018-0680-3
- """
-
-
- def __init__(self, f, g, h, operator, delta=None, gamma=None, initial=None, **kwargs):
-
- super(PD3O, self).__init__(**kwargs)
-
-
- self.set_up(f=f, g=g, h=h, operator=operator, delta=delta, gamma=gamma, initial=initial, **kwargs)
-
-
- def set_up(self, f, g, h, operator, delta=None, gamma=None, initial=None,**kwargs):
-
- logging.info("{} setting up".format(self.__class__.__name__, ))
-
- if isinstance(f, ZeroFunction):
- warnings.warn(" If f is the ZeroFunction, then PD3O = PDHG. Please use PDHG instead. Otherwise, select a relatively small parameter gamma ", UserWarning)
- if gamma is None:
- gamma = 1.0/operator.norm()
-
- self.f = FunctionWrappingForPD3O(f)
- self.g = g # proximable
- self.h = h # composite
- self.operator = operator
-
- if gamma is None:
- gamma = 0.99*2.0/self.f.L
-
- if delta is None :
- delta = 0.99/(gamma*self.operator.norm()**2)
-
- self.gamma = gamma
- self.delta = delta
-
- if initial is None:
- self.x = self.operator.domain_geometry().allocate(0)
- else:
- self.x = initial.copy()
-
- self.x_old = self.x.copy()
-
- self.s_old = self.operator.range_geometry().allocate(0)
- self.s = self.operator.range_geometry().allocate(0)
-
- self.grad_f = self.operator.domain_geometry().allocate(0)
-
- self.configured = True
- logging.info("{} configured".format(self.__class__.__name__, ))
-
- # initial proximal conjugate step
- self.operator.direct(self.x_old, out=self.s)
- self.s_old.sapyb(1, self.s, self.delta, out=self.s_old)
- self.h.proximal_conjugate(self.s_old, self.delta, out=self.s)
-
-
- def update(self):
- r""" Performs a single iteration of the PD3O algorithm
- """
-
- # Following equations 4 in https://link.springer.com/article/10.1007/s10915-018-0680-3
- # in this case order of proximal steps we recover the (primal) PDHG, when f=0
-
-
- tmp = self.x_old
- self.x_old = self.x
- self.x = tmp
-
-
- # proximal step
- self.f.gradient(self.x_old, out=self.grad_f)
- self.x_old.sapyb(1., self.grad_f, -self.gamma, out = self.grad_f) # x_old - gamma * grad_f(x_old)
- self.operator.adjoint(self.s, out=self.x_old)
- self.x_old.sapyb(-self.gamma, self.grad_f, 1.0, out=self.x_old)
- self.g.proximal(self.x_old, self.gamma, out = self.x)
-
- # update step
- self.f.gradient(self.x, out=self.x_old)
- self.x_old *= self.gamma
- self.grad_f += self.x_old
- self.x.sapyb(2, self.grad_f, -1.0, out=self.x_old) # 2*x - x_old + gamma*(grad_f_x_old) - gamma*(grad_f_x)
-
- tmp = self.s_old
- self.s_old = self.s
- self.s = tmp
-
- # proximal conjugate step
- self.operator.direct(self.x_old, out=self.s)
- self.s_old.sapyb(1, self.s, self.delta, out=self.s_old)
- self.h.proximal_conjugate(self.s_old, self.delta, out=self.s)
-
-
-
-
-
- def update_objective(self):
- """
- Evaluates the primal objective
- """
- self.operator.direct(self.x, out=self.s_old)
- fun_h = self.h(self.s_old)
- fun_g = self.g(self.x)
- fun_f = self.f(self.x)
- p1 = fun_f + fun_g + fun_h
-
- self.loss.append(p1)
-
-
-
\ No newline at end of file
+# Copyright 2024 United Kingdom Research and Innovation
+# Copyright 2024 The University of Manchester
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Authors:
+# CIL Developers, listed at: https://github.com/TomographicImaging/CIL/blob/master/NOTICE.txt
+
+
+from cil.optimisation.algorithms import Algorithm
+from cil.optimisation.functions import ZeroFunction, ScaledFunction, SVRGFunction, LSVRGFunction, ApproximateGradientSumFunction
+from cil.utilities import dtype_like
+import logging
+import warnings
+
+class FunctionWrappingForPD3O():
+ """
+ An internal class that wraps the functions, :math:`f`, in PD3O to allow :math:`f` to be an `ApproximateGradientSumFunction`.
+
+ Note that currently :math:`f` can be any type of deterministic function, an `ApproximateGradientSumFunction` or a scaled `ApproximateGradientSumFunction` but this is not set up to work for a `SumFunction` or `TranslatedFunction` which contains `ApproximateGradientSumFunction`s.
+
+ Parameters
+ ----------
+ f : function
+ The function :math:`f` to use in PD3O
+ """
+ def __init__(self, f):
+ self.f = f
+ self.scalar = 1
+ while isinstance(self.f, ScaledFunction):
+ self.scalar *= self.f.scalar
+ self.f = self.f.function
+
+ self._gradient_call_index = 0
+
+ if isinstance(self.f, (SVRGFunction, LSVRGFunction)):
+ self.gradient = self.svrg_gradient
+ elif isinstance(self.f, ApproximateGradientSumFunction):
+ self.gradient = self.approximate_sum_function_gradient
+ else:
+ self.gradient = f.gradient
+
+ def svrg_gradient(self, x, out=None):
+ if self._gradient_call_index == 0:
+ self._gradient_call_index += 1
+ self.f.gradient(x, out)
+ else:
+ if len(self.f.data_passes_indices[-1]) == self.f.sampler.num_indices:
+ self.f._update_full_gradient_and_return(x, out=out)
+ else:
+ self.f.approximate_gradient( x, self.f.function_num, out=out)
+ self.f._data_passes_indices.pop(-1)
+ self._gradient_call_index = 0
+
+ if self.scalar != 1:
+ out *= self.scalar
+ return out
+
+ def approximate_sum_function_gradient(self, x, out=None):
+ if self._gradient_call_index == 0:
+ self._gradient_call_index += 1
+ self.f.gradient(x, out)
+ else:
+ self.f.approximate_gradient( x, self.f.function_num, out=out)
+ self._gradient_call_index = 0
+
+ if self.scalar != 1:
+ out *= self.scalar
+ return out
+
+
+ def __call__(self, x):
+ res = self.f(x)
+ return dtype_like(self.scalar, res) * res
+
+ @property
+ def L(self):
+ return abs(self.scalar) * self.f.L
+
+
+class PD3O(Algorithm):
+
+
+ r"""Primal Dual Three Operator Splitting (PD3O) algorithm, see "A New Primal–Dual Algorithm for Minimizing the Sum
+ of Three Functions with a Linear Operator". This is a primal dual algorithm for minimising :math:`f(x)+g(x)+h(Ax)` where all functions are proper, lower semi-continuous and convex,
+ :math:`f` should be differentiable with a Lipschitz continuous gradient and :math:`A` is a bounded linear operator.
+
+ Parameters
+ ----------
+ f : Function
+ A smooth function with Lipschitz continuous gradient.
+ g : Function
+ A convex function with a computationally computable proximal.
+ h : Function
+ A composite convex function.
+ operator: Operator
+ Bounded linear operator
+ delta: Float, optional, default is `1./(gamma*operator.norm()**2)`
+ The dual step-size
+ gamma: Float, optional, default is `2.0/f.L`
+ The primal step size
+ initial : DataContainer, optional default is a container of zeros, in the domain of the operator
+ Initial point for the algorithm.
+
+ Note
+ -----
+ Note that currently :math:`f` in PD3O can be any type of deterministic function, an `ApproximateGradientSumFunction` or a scaled `ApproximateGradientSumFunction`.
+
+ Note
+ -----
+ We have not implemented or tested when :math:`f` is a stochastic function (`ApproximateGradientSumFunction`) wrapped as a `SumFunction` or `TranslatedFunction`.
+
+ Reference
+ ---------
+ Yan, M. A New Primal–Dual Algorithm for Minimizing the Sum of Three Functions with a Linear Operator. J Sci Comput 76, 1698–1717 (2018). https://doi.org/10.1007/s10915-018-0680-3
+ """
+
+
+ def __init__(self, f, g, h, operator, delta=None, gamma=None, initial=None, **kwargs):
+
+ super(PD3O, self).__init__(**kwargs)
+
+
+ self.set_up(f=f, g=g, h=h, operator=operator, delta=delta, gamma=gamma, initial=initial, **kwargs)
+
+
+ def set_up(self, f, g, h, operator, delta=None, gamma=None, initial=None,**kwargs):
+
+ logging.info("{} setting up".format(self.__class__.__name__, ))
+
+ if isinstance(f, ZeroFunction):
+ warnings.warn(" If f is the ZeroFunction, then PD3O = PDHG. Please use PDHG instead. Otherwise, select a relatively small parameter gamma ", UserWarning)
+ if gamma is None:
+ gamma = 1.0/operator.norm()
+
+ self.f = FunctionWrappingForPD3O(f)
+ self.g = g # proximable
+ self.h = h # composite
+ self.operator = operator
+
+ if gamma is None:
+ gamma = 0.99*2.0/self.f.L
+
+ if delta is None :
+ delta = 0.99/(gamma*self.operator.norm()**2)
+
+ self.gamma = gamma
+ self.delta = delta
+
+ if initial is None:
+ self.x = self.operator.domain_geometry().allocate(0)
+ else:
+ self.x = initial.copy()
+
+ self.x_old = self.x.copy()
+
+ self.s_old = self.operator.range_geometry().allocate(0)
+ self.s = self.operator.range_geometry().allocate(0)
+
+ self.grad_f = self.operator.domain_geometry().allocate(0)
+
+ self.configured = True
+ logging.info("{} configured".format(self.__class__.__name__, ))
+
+ # initial proximal conjugate step
+ self.operator.direct(self.x_old, out=self.s)
+ self.s_old.sapyb(1, self.s, self.delta, out=self.s_old)
+ self.h.proximal_conjugate(self.s_old, self.delta, out=self.s)
+
+
+ def update(self):
+ r""" Performs a single iteration of the PD3O algorithm
+ """
+
+ # Following equations 4 in https://link.springer.com/article/10.1007/s10915-018-0680-3
+ # in this case order of proximal steps we recover the (primal) PDHG, when f=0
+
+
+ tmp = self.x_old
+ self.x_old = self.x
+ self.x = tmp
+
+
+ # proximal step
+ self.f.gradient(self.x_old, out=self.grad_f)
+ self.x_old.sapyb(1., self.grad_f, -self.gamma, out = self.grad_f) # x_old - gamma * grad_f(x_old)
+ self.operator.adjoint(self.s, out=self.x_old)
+ self.x_old.sapyb(-self.gamma, self.grad_f, 1.0, out=self.x_old)
+ self.g.proximal(self.x_old, self.gamma, out = self.x)
+
+ # update step
+ self.f.gradient(self.x, out=self.x_old)
+ self.x_old *= self.gamma
+ self.grad_f += self.x_old
+ self.x.sapyb(2, self.grad_f, -1.0, out=self.x_old) # 2*x - x_old + gamma*(grad_f_x_old) - gamma*(grad_f_x)
+
+ tmp = self.s_old
+ self.s_old = self.s
+ self.s = tmp
+
+ # proximal conjugate step
+ self.operator.direct(self.x_old, out=self.s)
+ self.s_old.sapyb(1, self.s, self.delta, out=self.s_old)
+ self.h.proximal_conjugate(self.s_old, self.delta, out=self.s)
+
+
+
+
+
+ def update_objective(self):
+ """
+ Evaluates the primal objective
+ """
+ self.operator.direct(self.x, out=self.s_old)
+ fun_h = self.h(self.s_old)
+ fun_g = self.g(self.x)
+ fun_f = self.f(self.x)
+ p1 = fun_f + fun_g + fun_h
+
+ self.loss.append(p1)
diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py
index 04545bde1d..a3931a822b 100644
--- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py
+++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py
@@ -54,7 +54,7 @@ class PDHG(Algorithm):
update_objective_interval : :obj:`int`, optional, default=1
Evaluates objectives, e.g., primal/dual/primal-dual gap every ``update_objective_interval``.
check_convergence : :obj:`boolean`, default=True
- Checks scalar sigma and tau values satisfy convergence criterion and warns if not satisfied. Can be computationally expensive for custom sigma or tau values.
+ Checks scalar sigma and tau values satisfy convergence criterion and warns if not satisfied. Can be computationally expensive for custom sigma or tau values.
theta : Float between 0 and 1, default 1.0
Relaxation parameter for the over-relaxation of the primal variable.
@@ -62,7 +62,7 @@ class PDHG(Algorithm):
Example
-------
- In our CIL-Demos repository (https://github.com/TomographicImaging/CIL-Demos) you can find examples using the PDHG algorithm for different imaging problems, such as Total Variation denoising, Total Generalised Variation inpainting
+ In our CIL-Demos repository (https://github.com/TomographicImaging/CIL-Demos) you can find examples using the PDHG algorithm for different imaging problems, such as Total Variation denoising, Total Generalised Variation inpainting
and Total Variation Tomography reconstruction. More examples can also be found in :cite:`Jorgensen_et_al_2021`, :cite:`Papoutsellis_et_al_2021`.
Note
@@ -126,7 +126,7 @@ class PDHG(Algorithm):
\tau \sigma \|K\|^2 < 4/3
For reference, see Li, Y. and Yan, M., 2022. On the improved conditions for some primal-dual algorithms. arXiv preprint arXiv:2201.00139.
-
+
- By default, the step sizes :math:`\sigma` and :math:`\tau` are positive scalars and defined as below:
* If ``sigma`` is ``None`` and ``tau`` is ``None``:
@@ -326,11 +326,11 @@ def set_up(self, f, g, operator, tau=None, sigma=None, initial=None):
Step size for the primal problem.
initial : `DataContainer`, or `list` or `tuple` of `DataContainer`s, optional, default is a DataContainer of zeros for both primal and dual variables
Initial point for the PDHG algorithm. If just one data container is provided, it is used for the primal and the dual variable is initialised as zeros. If a list or tuple is passed, the first element is used for the primal variable and the second one for the dual variable. If either of the two is not provided, it is initialised as a DataContainer of zeros.
-
- """
+
+ """
log.info("%s setting up", self.__class__.__name__)
-
-
+
+
# Triplet (f, g, K)
self.f = f
self.g = g
@@ -346,19 +346,19 @@ def set_up(self, f, g, operator, tau=None, sigma=None, initial=None):
self.x_old = initial[0].copy()
else:
self.x_old = self.operator.domain_geometry().allocate(0)
-
+
if len(initial) > 1 and initial[1] is not None:
self.y = initial[1].copy()
else:
self.y = self.operator.range_geometry().allocate(0)
- else:
+ else:
self.y = self.operator.range_geometry().allocate(0)
if initial is None:
self.x_old = self.operator.domain_geometry().allocate(0)
else:
self.x_old = initial.copy()
-
+
self.x = self.x_old.copy()
self.x_tmp = self.operator.domain_geometry().allocate(0)
self.y_tmp = self.operator.range_geometry().allocate(0)
@@ -418,11 +418,11 @@ def check_convergence(self):
-------
Boolean
True if convergence criterion is satisfied. False if not satisfied or convergence is unknown.
-
+
Reference
----------
Li, Y. and Yan, M., 2022. On the improved conditions for some primal-dual algorithms. arXiv preprint arXiv:2201.00139.
-
+
"""
if isinstance(self.tau, Number) and isinstance(self.sigma, Number):
if self.sigma * self.tau * self.operator.norm()**2 > 4/3:
@@ -468,17 +468,17 @@ def set_step_sizes(self, sigma=None, tau=None):
# Default sigma and tau step-sizes
if tau is None and sigma is None:
- self._sigma = 1.0/self.operator.norm()
- self._tau = 1.0/self.operator.norm()
+ self._sigma = 1/self.operator.norm()
+ self._tau = 1/self.operator.norm()
elif tau is not None and sigma is not None:
self._sigma = sigma
self._tau = tau
elif sigma is None and isinstance(tau, Number):
- self._sigma = 1./(tau*self.operator.norm()**2)
+ self._sigma = 1/(tau*self.operator.norm()**2)
self._tau = tau
elif tau is None and isinstance(sigma, Number):
self._sigma = sigma
- self._tau = 1./(self.sigma*self.operator.norm()**2)
+ self._tau = 1/(self.sigma*self.operator.norm()**2)
else:
raise NotImplementedError(
"If using arrays for sigma or tau both must arrays must be provided.")
diff --git a/Wrappers/Python/cil/optimisation/algorithms/SPDHG.py b/Wrappers/Python/cil/optimisation/algorithms/SPDHG.py
index 56a2673ffc..c8bf4d8a56 100644
--- a/Wrappers/Python/cil/optimisation/algorithms/SPDHG.py
+++ b/Wrappers/Python/cil/optimisation/algorithms/SPDHG.py
@@ -30,8 +30,8 @@
class SPDHG(Algorithm):
- r'''Stochastic Primal Dual Hybrid Gradient (SPDHG) solves separable optimisation problems of the type:
-
+ r'''Stochastic Primal Dual Hybrid Gradient (SPDHG) solves separable optimisation problems of the type:
+
.. math:: \min_{x} f(Kx) + g(x) = \min_{x} \sum f_i(K_i x) + g(x)
where :math:`f_i` and the regulariser :math:`g` need to be proper, convex and lower semi-continuous.
@@ -45,27 +45,27 @@ class SPDHG(Algorithm):
operator : BlockOperator
BlockOperator must contain Linear Operators
tau : positive float, optional
- Step size parameter for the primal problem. If `None` will be computed by algorithm, see note for details.
+ Step size parameter for the primal problem. If `None` will be computed by algorithm, see note for details.
sigma : list of positive float, optional
- List of Step size parameters for dual problem. If `None` will be computed by algorithm, see note for details.
+ List of Step size parameters for dual problem. If `None` will be computed by algorithm, see note for details.
initial : DataContainer, optional
Initial point for the SPDHG algorithm. The default value is a zero DataContainer in the range of the `operator`.
gamma : float, optional
Parameter controlling the trade-off between the primal and dual step sizes
- sampler: `cil.optimisation.utilities.Sampler`, optional
- A `Sampler` controllingthe selection of the next index for the SPDHG update. If `None`, a sampler will be created for uniform random sampling with replacement. See notes.
+ sampler: `cil.optimisation.utilities.Sampler`, optional
+ A `Sampler` controllingthe selection of the next index for the SPDHG update. If `None`, a sampler will be created for uniform random sampling with replacement. See notes.
- prob_weights: list of floats, optional,
+ prob_weights: list of floats, optional,
Consider that the sampler is called a large number of times this argument holds the expected number of times each index would be called, normalised to 1. Note that this should not be passed if the provided sampler has it as an attribute: if the sampler has a `prob_weight` attribute it will take precedence on this parameter. Should be a list of floats of length `num_indices` that sum to 1. If no sampler with `prob_weights` is passed, it defaults to `[1/len(operator)]*len(operator)`.
- Note
- -----
- The `sampler` can be an instance of the `cil.optimisation.utilities.Sampler` class or a custom class with the `__next__(self)` method implemented, which outputs an integer index from {1, ..., len(operator)}.
+ Note
+ -----
+ The `sampler` can be an instance of the `cil.optimisation.utilities.Sampler` class or a custom class with the `__next__(self)` method implemented, which outputs an integer index from {1, ..., len(operator)}.
- Note
- -----
- "Random sampling with replacement" will select the next index with equal probability from `1 - len(operator)`.
+ Note
+ -----
+ "Random sampling with replacement" will select the next index with equal probability from `1 - len(operator)`.
Example
@@ -103,7 +103,7 @@ class SPDHG(Algorithm):
and `tau` is set as per case 2
- - Case 2: If `sigma` is provided but not `tau` then `tau` is calculated using the formula
+ - Case 2: If `sigma` is provided but not `tau` then `tau` is calculated using the formula
.. math:: \tau = 0.99\min_i( \frac{p_i}{ (\sigma_i \|K_i\|^2) })
@@ -124,10 +124,10 @@ class SPDHG(Algorithm):
References
----------
- [1]"Stochastic primal-dual hybrid gradient algorithm with arbitrary
+ [1]"Stochastic primal-dual hybrid gradient algorithm with arbitrary
sampling and imaging applications",
Chambolle, Antonin, Matthias J. Ehrhardt, Peter Richtárik, and Carola-Bibiane Schonlieb,
- SIAM Journal on Optimization 28, no. 4 (2018): 2783-2808. https://doi.org/10.1137/17M1134834
+ SIAM Journal on Optimization 28, no. 4 (2018): 2783-2808. https://doi.org/10.1137/17M1134834
[2]"Faster PET reconstruction with non-smooth priors by randomization and preconditioning",
Matthias J Ehrhardt, Pawel Markiewicz and Carola-Bibiane Schönlieb,
@@ -159,14 +159,14 @@ def set_up(self, f, g, operator, sigma=None, tau=None,
raise TypeError("operator should be a BlockOperator")
self._ndual_subsets = len(self.operator)
-
- self._prob_weights = getattr(sampler, 'prob_weights', prob_weights)
-
- self._deprecated_set_prob(deprecated_kwargs, sampler)
-
- if self._prob_weights is None:
+
+ self._prob_weights = getattr(sampler, 'prob_weights', prob_weights)
+
+ self._deprecated_set_prob(deprecated_kwargs, sampler)
+
+ if self._prob_weights is None:
self._prob_weights = [1/self._ndual_subsets]*self._ndual_subsets
-
+
if prob_weights is not None and self._prob_weights != prob_weights:
raise ValueError(' You passed a `prob_weights` argument and a sampler with a different attribute `prob_weights`, please remove the `prob_weights` argument.')
@@ -175,9 +175,9 @@ def set_up(self, f, g, operator, sigma=None, tau=None,
len(operator), prob=self._prob_weights)
else:
self._sampler = sampler
-
+
#Set the norms of the operators
- self._deprecated_set_norms(deprecated_kwargs)
+ self._deprecated_set_norms(deprecated_kwargs)
self._norms = operator.get_norms_as_list()
#Check for other kwargs
if deprecated_kwargs:
@@ -215,14 +215,14 @@ def _deprecated_set_prob(self, deprecated_kwargs, sampler):
----------
deprecated_kwargs : dict
Dictionary of keyword arguments.
- sampler : Sampler
+ sampler : Sampler
Sampler class for selecting the next index for the SPDHG update.
Notes
-----
This method is called by the set_up method.
"""
-
+
prob = deprecated_kwargs.pop('prob', None)
if prob is not None:
@@ -250,14 +250,14 @@ def _deprecated_set_norms(self, deprecated_kwargs):
This method is called by the set_up method.
"""
norms = deprecated_kwargs.pop('norms', None)
-
+
if norms is not None:
self.operator.set_norms(norms)
warnings.warn(
' `norms` is being deprecated, use instead the `BlockOperator` function `set_norms`', DeprecationWarning, stacklevel=2)
-
+
@property
def sigma(self):
return self._sigma
@@ -311,7 +311,7 @@ def set_step_sizes_from_ratio(self, gamma=1.0, rho=0.99):
def set_step_sizes(self, sigma=None, tau=None):
r""" Sets sigma and tau step-sizes for the SPDHG algorithm after the initial set-up. The step sizes can be either scalar or array-objects.
- When setting `sigma` and `tau`, there are 4 possible cases considered by setup function:
+ When setting `sigma` and `tau`, there are 4 possible cases considered by setup function:
- Case 1: If neither `sigma` or `tau` are provided then `sigma` is set using the formula:
@@ -319,7 +319,7 @@ def set_step_sizes(self, sigma=None, tau=None):
and `tau` is set as per case 2
- - Case 2: If `sigma` is provided but not `tau` then `tau` is calculated using the formula
+ - Case 2: If `sigma` is provided but not `tau` then `tau` is calculated using the formula
.. math:: \tau = 0.99\min_i( \frac{p_i}{ (\sigma_i \|K_i\|^2) })
@@ -377,7 +377,7 @@ def check_convergence(self):
Returns
-------
Boolean
- True if convergence criterion is satisfied. False if not satisfied or convergence is unknown.
+ True if convergence criterion is satisfied. False if not satisfied or convergence is unknown.
Note
-----
@@ -385,8 +385,8 @@ def check_convergence(self):
Note
----
- This checks the convergence criterion. Numerical errors may mean some sigma and tau values that satisfy the convergence criterion may not converge.
- Alternatively, step sizes outside the convergence criterion may still allow (fast) convergence.
+ This checks the convergence criterion. Numerical errors may mean some sigma and tau values that satisfy the convergence criterion may not converge.
+ Alternatively, step sizes outside the convergence criterion may still allow (fast) convergence.
"""
for i in range(self._ndual_subsets):
if isinstance(self._tau, Number) and isinstance(self._sigma[i], Number):
@@ -397,7 +397,7 @@ def check_convergence(self):
raise ValueError('Convergence criterion currently can only be checked for scalar values of tau and sigma[i].')
def update(self):
- """ Runs one iteration of SPDHG
+ """ Runs one iteration of SPDHG
"""
# Gradient descent for the primal variable
@@ -442,9 +442,7 @@ def update(self):
def update_objective(self):
# p1 = self.f(self.operator.direct(self.x)) + self.g(self.x)
- p1 = 0.
- for i, op in enumerate(self.operator.operators):
- p1 += self.f[i](op.direct(self.x))
+ p1 = sum(self.f[i](op.direct(self.x)) for i, op in enumerate(self.operator.operators))
p1 += self.g(self.x)
d1 = - self.f.convex_conjugate(self._y_old)
@@ -456,38 +454,38 @@ def update_objective(self):
@property
def objective(self):
- '''The saved primal objectives.
+ '''The saved primal objectives.
Returns
-------
list
- The saved primal objectives from `update_objective`. The number of saved values depends on the `update_objective_interval` kwarg.
+ The saved primal objectives from `update_objective`. The number of saved values depends on the `update_objective_interval` kwarg.
'''
return [x[0] for x in self.loss]
@property
def dual_objective(self):
- '''The saved dual objectives.
+ '''The saved dual objectives.
Returns
-------
list
- The saved dual objectives from `update_objective`. The number of saved values depends on the `update_objective_interval` kwarg.
+ The saved dual objectives from `update_objective`. The number of saved values depends on the `update_objective_interval` kwarg.
'''
return [x[1] for x in self.loss]
@property
def primal_dual_gap(self):
- '''The saved primal-dual gap.
+ '''The saved primal-dual gap.
Returns
-------
list
- The saved primal dual gap from `update_objective`. The number of saved values depends on the `update_objective_interval` kwarg.
+ The saved primal dual gap from `update_objective`. The number of saved values depends on the `update_objective_interval` kwarg.
'''
return [x[2] for x in self.loss]
def _save_previous_iteration(self, index, y_current):
- ''' Internal function used to save the previous iteration
+ ''' Internal function used to save the previous iteration
'''
self._y_old[index].fill(y_current)
diff --git a/Wrappers/Python/cil/optimisation/functions/Function.py b/Wrappers/Python/cil/optimisation/functions/Function.py
index 5687155f5c..e94b8cbdef 100644
--- a/Wrappers/Python/cil/optimisation/functions/Function.py
+++ b/Wrappers/Python/cil/optimisation/functions/Function.py
@@ -16,12 +16,8 @@
#
# Authors:
# CIL Developers, listed at: https://github.com/TomographicImaging/CIL/blob/master/NOTICE.txt
-
-import warnings
-
from numbers import Number
import numpy as np
-from functools import reduce
from cil.utilities.errors import InPlaceError
@@ -319,10 +315,7 @@ def __call__(self, x):
.. math:: (F_{1} + F_{2} + ... + F_{n})(x) = F_{1}(x) + F_{2}(x) + ... + F_{n}(x)
"""
- ret = 0.
- for f in self.functions:
- ret += f(x)
- return ret
+ return sum(f(x) for f in self.functions)
def gradient(self, x, out=None):
r"""Returns the value of the sum of the gradient of functions evaluated at :math:`x`, if all of them are differentiable.
diff --git a/Wrappers/Python/cil/optimisation/functions/L2NormSquared.py b/Wrappers/Python/cil/optimisation/functions/L2NormSquared.py
index 7e57e2c3fe..4d7a30e248 100644
--- a/Wrappers/Python/cil/optimisation/functions/L2NormSquared.py
+++ b/Wrappers/Python/cil/optimisation/functions/L2NormSquared.py
@@ -179,11 +179,7 @@ def gradient(self, x, out=None):
def convex_conjugate(self, x):
r"""Returns the value of the convex conjugate of the WeightedL2NormSquared function at x."""
- tmp = 0
- if self.b is not None:
- tmp = x.dot(self.b)
-
- return (1./4) * (x/self.weight.sqrt()).squared_norm() + tmp
+ return 0.25 * (x/self.weight.sqrt()).squared_norm() + (x.dot(self.b) if self.b is not None else 0)
def proximal(self, x, tau, out=None):
r"""Returns the value of the proximal operator of the WeightedL2NormSquared function at x."""
diff --git a/Wrappers/Python/cil/optimisation/functions/LeastSquares.py b/Wrappers/Python/cil/optimisation/functions/LeastSquares.py
index cb7f0e0be0..c641f87944 100644
--- a/Wrappers/Python/cil/optimisation/functions/LeastSquares.py
+++ b/Wrappers/Python/cil/optimisation/functions/LeastSquares.py
@@ -18,7 +18,6 @@
from cil.optimisation.operators import LinearOperator, DiagonalOperator
from cil.optimisation.functions import Function
-from cil.framework import DataContainer
import warnings
from numbers import Number
import numpy as np
@@ -121,11 +120,11 @@ def calculate_Lipschitz(self):
# Compute the Lipschitz parameter from the operator if possible
# Leave it initialised to None otherwise
try:
- self._L = 2.0 * np.abs(self.c) * (self.A.norm()**2)
+ self._L = 2 * np.abs(self.c) * (self.A.norm()**2)
except AttributeError as ae:
if self.A.is_linear():
Anorm = LinearOperator.PowerMethod(self.A, 10)[0]
- self._L = 2.0 * np.abs(self.c) * (Anorm*Anorm)
+ self._L = 2 * np.abs(self.c) * (Anorm*Anorm)
else:
warnings.warn('{} could not calculate Lipschitz Constant. {}'.format(
self.__class__.__name__, ae))
diff --git a/Wrappers/Python/cil/optimisation/utilities/StepSizeMethods.py b/Wrappers/Python/cil/optimisation/utilities/StepSizeMethods.py
index 836cb01bf4..152e8248f2 100644
--- a/Wrappers/Python/cil/optimisation/utilities/StepSizeMethods.py
+++ b/Wrappers/Python/cil/optimisation/utilities/StepSizeMethods.py
@@ -25,11 +25,11 @@
class StepSizeRule(ABC):
"""
- Abstract base class for a step size rule. The abstract method, `get_step_size` takes in an algorithm and thus can access all parts of the algorithm (e.g. current iterate, current gradient, objective functions etc) and from this should return a float as a step size.
+ Abstract base class for a step size rule. The abstract method, `get_step_size` takes in an algorithm and thus can access all parts of the algorithm (e.g. current iterate, current gradient, objective functions etc) and from this should return a float as a step size.
"""
def __init__(self):
- '''Initialises the step size rule
+ '''Initialises the step size rule
'''
pass
@@ -38,27 +38,27 @@ def get_step_size(self, algorithm):
"""
Returns
--------
- the calculated step size:float
+ the calculated step size:float
"""
pass
class ConstantStepSize(StepSizeRule):
"""
- Step-size rule that always returns a constant step-size.
+ Step-size rule that always returns a constant step-size.
Parameters
----------
step_size: float
- The step-size to be returned with each call.
+ The step-size to be returned with each call.
"""
def __init__(self, step_size):
'''Initialises the constant step size rule
-
+
Parameters:
-------------
- step_size : float, the constant step size
+ step_size : float, the constant step size
'''
self.step_size = step_size
@@ -80,40 +80,40 @@ class ArmijoStepSizeRule(StepSizeRule):
Reference
---------
- Algorithm 3.1 in Nocedal, J. and Wright, S.J. eds., 1999. Numerical optimization. New York, NY: Springer New York. https://www.math.uci.edu/~qnie/Publications/NumericalOptimization.pdf)
-
+
- https://projecteuclid.org/download/pdf_1/euclid.pjm/1102995080
-
-
+
+
Parameters
----------
alpha: float, optional, default=1e6
- The starting point for the step size iterations
+ The starting point for the step size iterations
beta: float between 0 and 1, optional, default=0.5
The amount the step_size is reduced if the criterion is not met
max_iterations: integer, optional, default is numpy.ceil (2 * numpy.log10(alpha) / numpy.log10(2))
- The maximum number of iterations to find a suitable step size
+ The maximum number of iterations to find a suitable step size
warmstart: Boolean, default is True
- If `warmstart = True` the initial step size at each Armijo iteration is the calculated step size from the last iteration. If `warmstart = False` at each Armijo iteration, the initial step size is reset to the original, large `alpha`.
- In the case of *well-behaved* convex functions, `warmstart = True` is likely to be computationally less expensive. In the case of non-convex functions, or particularly tricky functions, setting `warmstart = False` may be beneficial.
+ If `warmstart = True` the initial step size at each Armijo iteration is the calculated step size from the last iteration. If `warmstart = False` at each Armijo iteration, the initial step size is reset to the original, large `alpha`.
+ In the case of *well-behaved* convex functions, `warmstart = True` is likely to be computationally less expensive. In the case of non-convex functions, or particularly tricky functions, setting `warmstart = False` may be beneficial.
"""
def __init__(self, alpha=1e6, beta=0.5, max_iterations=None, warmstart=True):
- '''Initialises the step size rule
+ '''Initialises the step size rule
'''
-
+
self.alpha_orig = alpha
if self.alpha_orig is None: # Can be removed when alpha and beta are deprecated in GD
- self.alpha_orig = 1e6
+ self.alpha_orig = 1e6
self.alpha = self.alpha_orig
- self.beta = beta
+ self.beta = beta
if self.beta is None: # Can be removed when alpha and beta are deprecated in GD
self.beta = 0.5
-
+
self.max_iterations = max_iterations
if self.max_iterations is None:
self.max_iterations = numpy.ceil(2 * numpy.log10(self.alpha_orig) / numpy.log10(2))
-
+
self.warmstart=warmstart
def get_step_size(self, algorithm):
@@ -126,15 +126,15 @@ def get_step_size(self, algorithm):
"""
k = 0
- if not self.warmstart:
+ if not self.warmstart:
self.alpha = self.alpha_orig
-
+
f_x = algorithm.calculate_objective_function_at_point(algorithm.solution)
self.x_armijo = algorithm.solution.copy()
-
+
log.debug("Starting Armijo backtracking with initial step size: %f", self.alpha)
-
+
while k < self.max_iterations:
algorithm.gradient_update.multiply(self.alpha, out=self.x_armijo)
@@ -142,17 +142,17 @@ def get_step_size(self, algorithm):
f_x_a = algorithm.calculate_objective_function_at_point(self.x_armijo)
sqnorm = algorithm.gradient_update.squared_norm()
- if f_x_a - f_x <= - (self.alpha/2.) * sqnorm:
+ if f_x_a - f_x <= - (self.alpha/2) * sqnorm:
break
k += 1.
self.alpha *= self.beta
-
+
log.info("Armijo rule took %d iterations to find step size", k)
if k == self.max_iterations:
raise ValueError(
'Could not find a proper step_size in {} loops. Consider increasing alpha or max_iterations.'.format(self.max_iterations))
-
+
return self.alpha
@@ -165,34 +165,34 @@ class BarzilaiBorweinStepSizeRule(StepSizeRule):
- :math:`\alpha_k^{LONG}=\frac{\Delta x\cdot\Delta x}{\Delta x\cdot\Delta g}`, or
- :math:`\alpha_k^{SHORT}=\frac{\Delta x \cdot\Delta g}{\Delta g \cdot\Delta g}`.
-
- Where the operator :math:`\cdot` is the standard inner product between two vectors.
-
+
+ Where the operator :math:`\cdot` is the standard inner product between two vectors.
+
This is suitable for use with gradient based iterative methods where the calculated gradient is stored as `algorithm.gradient_update`.
-
+
Parameters
----------
- initial: float, greater than zero
+ initial: float, greater than zero
The step-size for the first iteration. We recommend something of the order :math:`1/f.L` where :math:`f` is the (differentiable part of) the objective you wish to minimise.
- mode: One of 'long', 'short' or 'alternate', default is 'short'.
- This calculates the step-size based on the LONG, SHORT or alternating between the two, starting with short.
+ mode: One of 'long', 'short' or 'alternate', default is 'short'.
+ This calculates the step-size based on the LONG, SHORT or alternating between the two, starting with short.
stabilisation_param: 'auto', float or 'off', default is 'auto'
In order to add stability the step-size has an upper limit of :math:`\Delta/\|g_k\|` where by 'default', the `stabilisation_param`, :math:`\Delta` is determined automatically to be the minimium of :math:`\Delta x` from the first 3 iterations. The user can also pass a fixed constant or turn "off" the stabilisation, equivalently passing `np.inf`.
-
-
+
+
Reference
---------
- Barzilai, Jonathan; Borwein, Jonathan M. (1988). "Two-Point Step Size Gradient Methods". IMA Journal of Numerical Analysis. 8: 141–148, https://doi.org/10.1093/imanum/8.1.141
-
+
- Burdakov, O., Dai, Y. and Huang, N., 2019. STABILIZED BARZILAI-BORWEIN METHOD. Journal of Computational Mathematics, 37(6). https://doi.org/10.4208/jcm.1911-m2019-0171
- https://en.wikipedia.org/wiki/Barzilai-Borwein_method
"""
def __init__(self, initial, mode='short', stabilisation_param="auto"):
- '''Initialises the step size rule
+ '''Initialises the step size rule
'''
-
+
self.mode=mode
if self.mode == 'short':
self.is_short = True
@@ -200,23 +200,23 @@ def __init__(self, initial, mode='short', stabilisation_param="auto"):
self.is_short = False
else:
raise ValueError('Mode should be chosen from "long", "short" or "alternate". ')
-
- self.store_grad=None
+
+ self.store_grad=None
self.store_x=None
self.initial=initial
if stabilisation_param == 'auto':
self.adaptive = True
stabilisation_param = numpy.inf
elif stabilisation_param == "off":
- self.adaptive = False
+ self.adaptive = False
stabilisation_param = numpy.inf
elif ( isinstance(stabilisation_param, Number) and stabilisation_param >=0):
- self.adaptive = False
+ self.adaptive = False
else:
raise TypeError(" The stabilisation_param should be 'auto', a positive number or 'off'")
self.stabilisation_param=stabilisation_param
-
-
+
+
def get_step_size(self, algorithm):
"""
@@ -227,37 +227,37 @@ def get_step_size(self, algorithm):
the calculated step size:float
"""
- #For the first iteration we use an initial step size because the BB step size requires a previous iterate.
+ #For the first iteration we use an initial step size because the BB step size requires a previous iterate.
if self.store_x is None:
- self.store_x=algorithm.x.copy() # We store the last iterate in order to calculate the BB step size
- self.store_grad=algorithm.gradient_update.copy()# We store the last gradient in order to calculate the BB step size
+ self.store_x=algorithm.x.copy() # We store the last iterate in order to calculate the BB step size
+ self.store_grad=algorithm.gradient_update.copy()# We store the last gradient in order to calculate the BB step size
return self.initial
-
+
gradient_norm = algorithm.gradient_update.norm()
- #If the gradient is zero, gradient based algorithms will not update and te step size calculation will divide by zero so we stop iterations.
+ #If the gradient is zero, gradient based algorithms will not update and te step size calculation will divide by zero so we stop iterations.
if gradient_norm < 1e-8:
raise StopIteration
- algorithm.x.subtract(self.store_x, out=self.store_x)
+ algorithm.x.subtract(self.store_x, out=self.store_x)
algorithm.gradient_update.subtract(self.store_grad, out=self.store_grad)
if self.is_short:
ret = (self.store_x.dot(self.store_grad))/ (self.store_grad.dot(self.store_grad))
else:
ret = (self.store_x.dot(self.store_x))/ (self.store_x.dot(self.store_grad))
-
+
#This computes the default stabilisation parameter, using the first three iterations
if (algorithm.iteration <=3 and self.adaptive):
self.stabilisation_param = min(self.stabilisation_param, self.store_x.norm() )
-
- # Computes the step size as the minimum of the ret, above, and :math:`\Delta/\|g_k\|` ignoring any NaN values.
+
+ # Computes the step size as the minimum of the ret, above, and :math:`\Delta/\|g_k\|` ignoring any NaN values.
ret = numpy.nanmin( numpy.array([ret, self.stabilisation_param/gradient_norm]))
-
- # We store the last iterate and gradient in order to calculate the BB step size
+
+ # We store the last iterate and gradient in order to calculate the BB step size
self.store_x.fill(algorithm.x)
self.store_grad.fill(algorithm.gradient_update)
-
+
if self.mode == "alternate":
- self.is_short = not self.is_short
-
- return ret
\ No newline at end of file
+ self.is_short = not self.is_short
+
+ return ret
diff --git a/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_2D.py b/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_2D.py
index 35667c616a..5a9f3a5619 100644
--- a/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_2D.py
+++ b/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_2D.py
@@ -112,15 +112,10 @@ def rotation_matrix_z_from_euler(angle, degrees):
degrees : bool
if radian or degrees
"""
- if degrees:
- alpha = angle / 180. * np.pi
- else:
- alpha = angle
-
+ alpha = angle * (np.pi / 180) if degrees else angle
rot_matrix = np.zeros((2,2), dtype=np.float64)
rot_matrix[0][0] = np.cos(alpha)
- rot_matrix[0][1] = -np.sin(alpha)
rot_matrix[1][0] = np.sin(alpha)
- rot_matrix[1][1] = np.cos(alpha)
-
+ rot_matrix[0][1] = -rot_matrix[1][0]
+ rot_matrix[1][1] = rot_matrix[0][0]
return rot_matrix
diff --git a/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_3D.py b/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_3D.py
index 632dc7ccb1..d1c50841da 100644
--- a/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_3D.py
+++ b/Wrappers/Python/cil/plugins/astra/utilities/convert_geometry_to_astra_vec_3D.py
@@ -42,7 +42,7 @@ def convert_geometry_to_astra_vec_3D(volume_geometry, sinogram_geometry_in):
if sinogram_geometry_in.geom_type == AcquisitionType.CONE_FLEX:
return convert_flex_geometry_to_astra_vec_3D(volume_geometry, sinogram_geometry_in)
-
+
return convert_standard_geometry_to_astra_vec_3D(volume_geometry, sinogram_geometry_in)
@@ -65,7 +65,7 @@ def convert_standard_geometry_to_astra_vec_3D(volume_geometry, sinogram_geometry
"""
if sinogram_geometry_in.geom_type not in [AcquisitionType.PARALLEL, AcquisitionType.CONE]:
raise ValueError(f"Unsupported geometry type: {sinogram_geometry_in.geom_type}. Only {AcquisitionType.PARALLEL} and {AcquisitionType.CONE} geometries are supported.")
-
+
sinogram_geometry = sinogram_geometry_in.copy()
volume_geometry_temp = volume_geometry.copy()
@@ -75,7 +75,7 @@ def convert_standard_geometry_to_astra_vec_3D(volume_geometry, sinogram_geometry
system = sinogram_geometry.config.system
panel = sinogram_geometry.config.panel
-
+
if AcquisitionType.DIM2 & sinogram_geometry.dimension:
#create a 3D astra geom from 2D CIL geometry
@@ -124,7 +124,7 @@ def convert_standard_geometry_to_astra_vec_3D(volume_geometry, sinogram_geometry
elif sinogram_geometry.geom_type != 'cone_flex':
src = system.source.position.reshape(3,1)
projector = 'cone_vec'
-
+
#Build for astra 3D only
vectors = np.zeros((angles.num_positions, 12))
@@ -137,7 +137,7 @@ def convert_standard_geometry_to_astra_vec_3D(volume_geometry, sinogram_geometry
vectors[i, :3] = rotation_matrix.dot(src).reshape(3)
vectors[i, 3:6] = rotation_matrix.dot(det).reshape(3)
vectors[i, 6:9] = rotation_matrix.dot(row).reshape(3)
- vectors[i, 9:] = rotation_matrix.dot(col).reshape(3)
+ vectors[i, 9:] = rotation_matrix.dot(col).reshape(3)
proj_geom = astra.creators.create_proj_geom(projector, panel.num_pixels[1], panel.num_pixels[0], vectors)
vol_geom = astra.create_vol_geom(volume_geometry_temp.voxel_num_y,
@@ -176,11 +176,11 @@ def convert_flex_geometry_to_astra_vec_3D(volume_geometry, sinogram_geometry_in)
if sinogram_geometry_in.geom_type != AcquisitionType.CONE_FLEX:
raise ValueError(f"Unsupported geometry type: {sinogram_geometry_in.geom_type}. Only {AcquisitionType.CONE_FLEX} geometries are supported.")
-
+
system = sinogram_geometry_in.config.system
panel = sinogram_geometry_in.config.panel
-
+
sign_h = 1
sign_v = 1
@@ -226,17 +226,12 @@ def rotation_matrix_z_from_euler(angle, degrees):
degrees : bool
if radian or degrees
"""
-
- if degrees:
- alpha = angle / 180. * np.pi
- else:
- alpha = angle
-
+ alpha = angle * (np.pi / 180) if degrees else angle
rot_matrix = np.zeros((3,3), dtype=np.float64)
rot_matrix[0][0] = np.cos(alpha)
- rot_matrix[0][1] = - np.sin(alpha)
rot_matrix[1][0] = np.sin(alpha)
- rot_matrix[1][1] = np.cos(alpha)
+ rot_matrix[0][1] = -rot_matrix[1][0]
+ rot_matrix[1][1] = rot_matrix[0][0]
rot_matrix[2][2] = 1
return rot_matrix
diff --git a/Wrappers/Python/cil/plugins/tigre/Geometry.py b/Wrappers/Python/cil/plugins/tigre/Geometry.py
index c34d2acb29..79476d36db 100644
--- a/Wrappers/Python/cil/plugins/tigre/Geometry.py
+++ b/Wrappers/Python/cil/plugins/tigre/Geometry.py
@@ -37,7 +37,8 @@ def getTIGREGeometry(ig, ag):
angles *= (np.pi/180.)
#convert CIL to TIGRE angles s
- angles = -(angles + np.pi/2 +tg.theta )
+ angles += np.pi/2 + tg.theta
+ angles *= -1
#angles in range -pi->pi
for i, a in enumerate(angles):
diff --git a/Wrappers/Python/cil/processors/MaskGenerator.py b/Wrappers/Python/cil/processors/MaskGenerator.py
index d3d5dd380e..bfde3cde32 100644
--- a/Wrappers/Python/cil/processors/MaskGenerator.py
+++ b/Wrappers/Python/cil/processors/MaskGenerator.py
@@ -16,8 +16,8 @@
# Authors:
# CIL Developers, listed at: https://github.com/TomographicImaging/CIL/blob/master/NOTICE.txt
-from cil.framework import DataProcessor, AcquisitionData, ImageData, DataContainer, ImageGeometry
-import warnings
+from cil.framework import DataProcessor
+from cil.utilities import dtype_like
import numpy
from scipy import special, ndimage
@@ -197,13 +197,13 @@ def check_input(self, data):
"Expected {}, got {}.".format(data.dimension_labels, self.axis))
return True
-
+
def check_output(self, out):
if out is not None:
if out.array.dtype != bool:
raise TypeError("Input type mismatch: got {0} expecting {1}"\
.format(out.array.dtype, bool))
-
+
return True
@@ -285,8 +285,7 @@ def process(self, out=None):
# if global mean
else:
-
- mask[numpy.abs(arr - numpy.mean(arr)) > self.threshold_factor * numpy.std(arr)] = 0
+ mask[numpy.abs(arr - numpy.mean(arr)) > dtype_like(self.threshold_factor, arr) * numpy.std(arr)] = 0
elif self.mode == 'median':
@@ -308,13 +307,13 @@ def process(self, out=None):
tmp = numpy.abs(arr - numpy.tile((numpy.median(arr, axis=axis_index))[slice_obj], tile_par))
median_absolute_dev = numpy.tile((numpy.median(tmp, axis=axis_index))[slice_obj], tile_par)
- mask[tmp > self.threshold_factor * c * median_absolute_dev] = 0
+ mask[tmp > dtype_like(self.threshold_factor * c, arr) * median_absolute_dev] = 0
# if global median
else:
tmp = numpy.abs(arr - numpy.median(arr))
- mask[tmp > self.threshold_factor * c * numpy.median(tmp)] = 0
+ mask[tmp > dtype_like(self.threshold_factor * c, arr) * numpy.median(tmp)] = 0
elif self.mode == 'movmean':
@@ -327,14 +326,14 @@ def process(self, out=None):
mean_array = ndimage.generic_filter(arr, numpy.mean, size=kernel, mode='reflect')
std_array = ndimage.generic_filter(arr, numpy.std, size=kernel, mode='reflect')
- mask[numpy.abs(arr - mean_array) > self.threshold_factor * std_array] = 0
+ mask[numpy.abs(arr - mean_array) > dtype_like(self.threshold_factor, arr) * std_array] = 0
# if global movmean
else:
mean_array = ndimage.generic_filter(arr, numpy.mean, size=(self.window,)*ndim, mode='reflect')
std_array = ndimage.generic_filter(arr, numpy.std, size=(self.window,)*ndim, mode='reflect')
- mask[numpy.abs(arr - mean_array) > self.threshold_factor * std_array] = 0
+ mask[numpy.abs(arr - mean_array) > dtype_like(self.threshold_factor, arr) * std_array] = 0
elif self.mode == 'movmedian':
@@ -356,7 +355,7 @@ def process(self, out=None):
median_array = ndimage.median_filter(arr, footprint=kernel_shape, mode='reflect')
tmp = abs(arr - median_array)
- mask[tmp > self.threshold_factor * c * ndimage.median_filter(tmp, footprint=kernel_shape, mode='reflect')] = 0
+ mask[tmp > dtype_like(self.threshold_factor * c, arr) * ndimage.median_filter(tmp, footprint=kernel_shape, mode='reflect')] = 0
# if global movmedian
else:
@@ -365,7 +364,7 @@ def process(self, out=None):
median_array = ndimage.median_filter(arr, size=kernel_shape, mode='reflect')
tmp = abs(arr - median_array)
- mask[tmp > self.threshold_factor * c * ndimage.median_filter(tmp, size=kernel_shape, mode='reflect')] = 0
+ mask[tmp > dtype_like(self.threshold_factor * c, arr) * ndimage.median_filter(tmp, size=kernel_shape, mode='reflect')] = 0
else:
raise ValueError('Mode not recognised. One of the following is expected: ' + \
@@ -381,7 +380,7 @@ def process(self, out=None):
out = type(data)(mask, deep_copy=False, dtype=mask.dtype, geometry=geometry, dimension_labels=data.dimension_labels)
else:
out.fill(mask)
-
+
return out
def _parse_threshold_value(self, arr, quantile=False):
diff --git a/Wrappers/Python/cil/processors/Padder.py b/Wrappers/Python/cil/processors/Padder.py
index 4e0cecf71b..ece36a4ba7 100644
--- a/Wrappers/Python/cil/processors/Padder.py
+++ b/Wrappers/Python/cil/processors/Padder.py
@@ -415,7 +415,7 @@ def check_input(self, data):
if isinstance(data, (ImageData, AcquisitionData)):
self._data_array = True
self._geometry = data.geometry
-
+
elif isinstance(data, DataContainer):
self._data_array = True
self._geometry = None
@@ -467,7 +467,7 @@ def _get_dimensions_from_dict(self, dict):
def _set_up(self):
-
+
data = self.get_input()
offset = 4-data.ndim
@@ -543,7 +543,7 @@ def _set_up(self):
self._processed_dims[i] = 1
self._shape_out_full[i] += self._pad_width_param[i][0] + self._pad_width_param[i][1]
- self._shape_out = tuple([i for i in self._shape_out_full if i > 1])
+ self._shape_out = tuple(i for i in self._shape_out_full if i > 1)
def _process_acquisition_geometry(self):
diff --git a/Wrappers/Python/cil/recon/FBP.py b/Wrappers/Python/cil/recon/FBP.py
index 45a4ee116b..42f64bc64f 100644
--- a/Wrappers/Python/cil/recon/FBP.py
+++ b/Wrappers/Python/cil/recon/FBP.py
@@ -386,13 +386,13 @@ def __init__ (self, input, image_geometry=None, filter='ram-lak'):
def _calculate_weights(self, acquisition_geometry):
ag = acquisition_geometry
- xv = np.arange(-(ag.pixel_num_h -1)/2,(ag.pixel_num_h -1)/2 + 1,dtype=np.float32) * ag.pixel_size_h
- yv = np.arange(-(ag.pixel_num_v -1)/2,(ag.pixel_num_v -1)/2 + 1,dtype=np.float32) * ag.pixel_size_v
+ xv = np.arange(-(ag.pixel_num_h -1)/2,(ag.pixel_num_h -1)/2 + 1,dtype=np.float32) * np.float32(ag.pixel_size_h)
+ yv = np.arange(-(ag.pixel_num_v -1)/2,(ag.pixel_num_v -1)/2 + 1,dtype=np.float32) * np.float32(ag.pixel_size_v)
(yy, xx) = np.meshgrid(xv, yv)
principal_ray_length = ag.dist_source_center + ag.dist_center_detector
scaling = 0.25 * ag.magnification * (2 * np.pi/ ag.num_projections) / ag.pixel_size_h
- self._weights = scaling * principal_ray_length / np.sqrt((principal_ray_length ** 2 + xx ** 2 + yy ** 2))
+ self._weights = np.float32(scaling * principal_ray_length / np.sqrt((principal_ray_length ** 2 + xx ** 2 + yy ** 2)))
def run(self, out=None, verbose=1):
diff --git a/Wrappers/Python/cil/utilities/__init__.py b/Wrappers/Python/cil/utilities/__init__.py
index f8f2fc2df8..9ccd2ed0da 100644
--- a/Wrappers/Python/cil/utilities/__init__.py
+++ b/Wrappers/Python/cil/utilities/__init__.py
@@ -15,3 +15,10 @@
#
# Authors:
# CIL Developers, listed at: https://github.com/TomographicImaging/CIL/blob/master/NOTICE.txt
+def dtype_like(input_value, reference_array):
+ """`input_value.astype(reference_array.dtype, copy=False)` with fallback to `input_value`"""
+ if hasattr(reference_array, 'dtype'):
+ if hasattr(input_value, 'astype'):
+ return input_value.astype(reference_array.dtype, copy=False)
+ return reference_array.dtype.type(input_value)
+ return input_value
diff --git a/Wrappers/Python/cil/utilities/display.py b/Wrappers/Python/cil/utilities/display.py
index 255ed26d1d..14d3bc6f3b 100644
--- a/Wrappers/Python/cil/utilities/display.py
+++ b/Wrappers/Python/cil/utilities/display.py
@@ -785,7 +785,7 @@ def draw(self, elev=35, azim=35, view_distance=10, grid=False, figsize=(10,10),
world_limits = self.ax.get_w_lims()
self.ax.set_box_aspect((world_limits[1]-world_limits[0],world_limits[3]-world_limits[2],world_limits[5]-world_limits[4]))
- l = self.ax.plot(np.NaN, np.NaN, '-', color='none', label='')[0]
+ l = self.ax.plot(np.nan, np.nan, '-', color='none', label='')[0]
for i in range(3):
self.handles.insert(2,l)
diff --git a/Wrappers/Python/test/test_BlockDataContainer.py b/Wrappers/Python/test/test_BlockDataContainer.py
index 01d2f376bd..58d81baac1 100644
--- a/Wrappers/Python/test/test_BlockDataContainer.py
+++ b/Wrappers/Python/test/test_BlockDataContainer.py
@@ -471,7 +471,7 @@ def test_sapyb_a_b_scalar(self):
cp0 = BlockDataContainer(data0,data2)
cp1 = BlockDataContainer(data1,data3)
-
+
out = cp0 * 0. - 10
@@ -485,10 +485,10 @@ def test_sapyb_a_b_scalar(self):
res = BlockDataContainer(res0, res2)
self.assertBlockDataContainerEqual(out, res)
-
+
#With out is None
out = cp0.sapyb(3,cp1,-2, num_threads=4)
-
+
self.assertBlockDataContainerEqual(out, res)
def test_sapyb_a_blockdc(self):
@@ -522,10 +522,10 @@ def test_sapyb_a_blockdc(self):
res = BlockDataContainer(res0, res2)
self.assertBlockDataContainerEqual(out, res)
-
+
#With out is None
out = cp0.sapyb(a,cp1,-2, num_threads=4)
-
+
self.assertBlockDataContainerEqual(out, res)
@@ -559,10 +559,10 @@ def test_sapyb_b_blockdc(self):
res = BlockDataContainer(res0, res2)
self.assertBlockDataContainerEqual(out, res)
-
+
#With out is None
out = cp0.sapyb(3,cp1, b, num_threads=4)
-
+
self.assertBlockDataContainerEqual(out, res)
def test_sapyb_ab_blockdc(self):
@@ -599,10 +599,10 @@ def test_sapyb_ab_blockdc(self):
res = BlockDataContainer(res0, res2)
self.assertBlockDataContainerEqual(out, res)
-
+
#With out is None
out = cp0.sapyb(a,cp1,b, num_threads=4)
-
+
self.assertBlockDataContainerEqual(out, res)
@@ -638,10 +638,10 @@ def test_sapyb_ab_blockdc_y_dc(self):
res = BlockDataContainer(res0, res2)
self.assertBlockDataContainerEqual(out, res)
-
+
#With out is None
out = cp0.sapyb(a,data1,b, num_threads=4)
-
+
self.assertBlockDataContainerEqual(out, res)
def test_BlockDataContainer_mixed_arithmetic(self):
@@ -954,7 +954,7 @@ def test_allocate(self):
bg = BlockGeometry(ig0, ig1)
self.assertBlockDataContainerAlmostEqual(bg.allocate(0), cp1)
self.assertBlockDataContainerAlmostEqual(cp1.geometry.allocate(0), cp1)
-
+
def test_pnorm2(self):
ig0 = ImageGeometry(2,3,4)
ig1 = ImageGeometry(2,3,5)
@@ -968,7 +968,7 @@ def test_pnorm2(self):
cp0.pnorm(2)
cp0 = BlockDataContainer(data2,data2)
- np.testing.assert_allclose(cp0.pnorm(2).as_array(), np.sqrt(1**2 + 1**2)*data2.as_array())
+ np.testing.assert_allclose(cp0.pnorm(2).as_array(), (2**0.5)*data2.as_array())
def test_pnorm1(self):
ig0 = ImageGeometry(2,3,4)
@@ -988,7 +988,7 @@ def test_pnorm1(self):
def test_equals(self):
ig0 = ImageGeometry(2,3,4)
-
+
data0 = ig0.allocate(0)
data2 = ig0.allocate(1)
@@ -1007,12 +1007,12 @@ def test_equals(self):
def test_geometry_None(self):
ig0 = ImageGeometry(2,3,4)
-
+
data0 = ig0.allocate(0)
-
+
from cil.framework import DataContainer
X, Y, Z = 2, 12, 5
-
+
a = numpy.ones((X, Y, Z), dtype='float32')
ds = DataContainer(a, False, ['X', 'Y', 'Z'])
diff --git a/Wrappers/Python/test/test_functions.py b/Wrappers/Python/test/test_functions.py
index 61e65c03b9..4b2296b457 100644
--- a/Wrappers/Python/test/test_functions.py
+++ b/Wrappers/Python/test/test_functions.py
@@ -183,8 +183,8 @@ def test_L2NormSquared(self):
f_scaled_data = scalar * L2NormSquared(b=b)
# call
- numpy.testing.assert_equal(f_scaled_no_data(u), scalar * f(u))
- numpy.testing.assert_equal(f_scaled_data(u), scalar * f1(u))
+ numpy.testing.assert_allclose(f_scaled_no_data(u), scalar * f(u))
+ numpy.testing.assert_allclose(f_scaled_data(u), scalar * f1(u))
# grad
numpy.testing.assert_array_almost_equal(
@@ -640,8 +640,8 @@ def tests_for_L2NormSq_and_weighted(self):
f_scaled_data = scalar * L2NormSquared(b=b)
# call
- numpy.testing.assert_equal(f_scaled_no_data(u), scalar * f(u))
- numpy.testing.assert_equal(f_scaled_data(u), scalar * f1(u))
+ numpy.testing.assert_allclose(f_scaled_no_data(u), scalar * f(u))
+ numpy.testing.assert_allclose(f_scaled_data(u), scalar * f1(u))
# grad
numpy.testing.assert_array_almost_equal(
@@ -786,13 +786,13 @@ def tests_for_LS_weightedLS(self):
# check call with weight
res1 = c * (A.direct(x) - b).dot(weight * (A.direct(x) - b))
res2 = f1(x)
- numpy.testing.assert_almost_equal(res1, res2)
+ numpy.testing.assert_allclose(res1, res2)
# check call without weight
#res1 = c * (A.direct(x)-b).dot((A.direct(x) - b))
res1 = c * (A.direct(x) - b).squared_norm()
res2 = f2(x)
- numpy.testing.assert_almost_equal(res1, res2)
+ numpy.testing.assert_allclose(res1, res2)
# check gradient with weight
out = ig.allocate(None)
@@ -2130,16 +2130,16 @@ class MockFunction(Function):
"""Mock function to test FunctionOfAbs"""
def __init__(self, L=1.0):
super().__init__(L=L)
-
+
def __call__(self, x):
return x.array**2 # Simple squared function
-
+
def proximal(self, x, tau, out=None):
if out is None:
out = x.geometry.allocate(None)
out.array = x.array / (1 + tau) # Simple proximal operator
return out
-
+
def convex_conjugate(self, x):
return 0.5 * x.array**2 # Quadratic conjugate function
@@ -2151,8 +2151,8 @@ def setUp(self):
self.data_real32 = VectorData(np.array([1, 2, 3], dtype=np.float32))
self.mock_function = MockFunction()
self.abs_function = FunctionOfAbs(self.mock_function)
-
-
+
+
def test_initialization(self):
self.assertIsInstance(self.abs_function._function, MockFunction)
self.assertFalse(self.abs_function._lower_semi)
@@ -2163,66 +2163,64 @@ def test_call(self):
result = self.abs_function(self.data_complex64)
expected = np.abs(self.data_complex64.array)**2
np.testing.assert_array_almost_equal(result, expected, decimal=4)
-
+
result = self.abs_function(self.data_complex128)
expected = np.abs(self.data_complex128.array)**2
np.testing.assert_array_almost_equal(result, expected, decimal=6)
-
+
result = self.abs_function(self.data_real32)
expected = np.abs(self.data_real32.array)**2
np.testing.assert_array_almost_equal(result, expected, decimal=6)
-
+
def test_get_real_complex_dtype(self):
from cil.optimisation.functions.AbsFunction import _get_real_complex_dtype
real, compl = _get_real_complex_dtype(self.data_complex128)
self.assertEqual( real, np.float64)
self.assertEqual(compl, np.complex128)
-
+
real, compl = _get_real_complex_dtype(self.data_complex64)
self.assertEqual( real, np.float32)
self.assertEqual(compl, np.complex64)
-
+
real, compl = _get_real_complex_dtype(self.data_real32)
self.assertEqual( real, np.float32)
self.assertEqual(compl, np.complex64)
-
-
-
-
+
+
+
+
def test_proximal(self):
tau = 0.5
result = self.abs_function.proximal(self.data_complex128, tau)
expected = (np.abs(self.data_complex128.array) / (1 + tau)) * np.exp(1j * np.angle(self.data_complex128.array))
np.testing.assert_array_almost_equal(result.array, expected, decimal=6)
-
+
result = self.abs_function.proximal(self.data_complex64, tau)
expected = (np.abs(self.data_complex64.array) / (1 + tau)) * np.exp(1j * np.angle(self.data_complex64.array))
np.testing.assert_array_almost_equal(result.array, expected, decimal=4)
-
+
result = self.abs_function.proximal(self.data_real32, tau)
expected = (np.abs(self.data_real32.array) / (1 + tau)) * np.exp(1j * np.angle(self.data_real32.array))
np.testing.assert_array_almost_equal(result.array, expected, decimal=4)
-
-
-
+
+
+
def test_convex_conjugate_lower_semi(self):
self.abs_function._lower_semi = True
result = self.abs_function.convex_conjugate(self.data_complex128)
expected = 0.5 * np.abs(self.data_complex128.array) ** 2
np.testing.assert_array_almost_equal(result, expected, decimal=6)
-
+
result = self.abs_function.convex_conjugate(self.data_complex64)
expected = 0.5 * np.abs(self.data_complex64.array) ** 2
np.testing.assert_array_almost_equal(result, expected, decimal=4)
-
+
result = self.abs_function.convex_conjugate(self.data_real32)
expected = 0.5 * np.abs(self.data_real32.array) ** 2
np.testing.assert_array_almost_equal(result, expected, decimal=4)
-
-
+
+
def test_convex_conjugate_not_implemented(self):
self.abs_function._lower_semi = False
-
- self.assertEqual(self.abs_function.convex_conjugate(self.data_real32), 0.)
-
+ self.assertEqual(self.abs_function.convex_conjugate(self.data_real32), 0.)
diff --git a/pyproject.toml b/pyproject.toml
index 499858c6d8..9c785bc000 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -62,7 +62,7 @@ dependencies = [
"h5py",
#"ipp==2021.12.*", # PyPI conflicts with conda package
"numba",
- "numpy>=1.23, <2.0.0",
+ "numpy>=1.23,<3",
"olefile>=0.46",
"pillow",
"pywavelets",
@@ -75,12 +75,12 @@ plugins = [
#"tomophantom==2.0.0", # [linux] # missing from PyPI
]
gpu = [
- "astra-toolbox>=1.9.9.dev5,<=2.1", # [not osx]
+ "astra-toolbox>=1.9.9.dev5,<=2.4", # [not osx]
#"tigre>=2.4,<=2.6", # missing from PyPI
]
[dependency-groups]
test = [
- #"ccpi-regulariser=24.0.1", # [not osx] # missing from PyPI
+ #"ccpi-regulariser=25.0.0", # [not osx] # missing from PyPI
"cvxpy",
"matplotlib-base>=3.3",
"packaging",
diff --git a/recipe/meta.yaml b/recipe/meta.yaml
index ebecb34f75..b7b7cd4e55 100644
--- a/recipe/meta.yaml
+++ b/recipe/meta.yaml
@@ -18,8 +18,8 @@ test:
- tomophantom 2.0.0 # [linux]
- tigre 2.6
- packaging
- - ccpi-regulariser 24.0.1 # [not osx]
- - astra-toolbox 2.1 {{ 'cuda*' if environ.get('TESTS_FORCE_GPU', '') else 'py*' }}
+ - ccpi-regulariser 25.0.0 {{ 'cuda*' if environ.get('TESTS_FORCE_GPU', '') else 'cpu*' }} # [not osx]
+ - astra-toolbox >=2.1 {{ 'cuda*' if environ.get('TESTS_FORCE_GPU', '') else 'py*' }}
- matplotlib-base >=3.3
- zenodo_get >=1.6
- dxchange
@@ -54,7 +54,7 @@ requirements:
run:
- {{ pin_compatible('python', min_pin='x.x') }}
- ipp {{ ipp_version }}
- - numpy >=1.23,<2
+ - numpy >=1.23,<3
- scipy >=1.4
- h5py
- pillow
diff --git a/scripts/create_local_env_for_cil_development.sh b/scripts/create_local_env_for_cil_development.sh
index 36c473b381..0b18557844 100755
--- a/scripts/create_local_env_for_cil_development.sh
+++ b/scripts/create_local_env_for_cil_development.sh
@@ -81,8 +81,8 @@ if test $test_deps = 0; then
conda_args+=(-c conda-forge -c https://software.repos.intel.com/python/conda --override-channels)
else
conda_args+=(
- astra-toolbox=2.1=cuda*
- ccpi-regulariser=24.0.1
+ astra-toolbox::astra-toolbox=2.*=cuda*
+ ccpi-regulariser=25.0.0=cuda*
cil-data
cvxpy
ipywidgets
diff --git a/scripts/requirements-test-windows.yml b/scripts/requirements-test-windows.yml
index 616db62ee9..a071c60a10 100644
--- a/scripts/requirements-test-windows.yml
+++ b/scripts/requirements-test-windows.yml
@@ -18,12 +18,12 @@ channels:
- https://software.repos.intel.com/python/conda
dependencies:
- python >=3.10
- - numpy >=1.23,<2
+ - numpy >=1.23,<3
- ccpi::cil-data >=22
- ccpi::tigre 2.6
- - ccpi::ccpi-regulariser 24.0.1
+ - ccpi::ccpi-regulariser 25.0.0 cuda*
- ccpi::tomophantom 2.0.0
- - astra-toolbox 2.1 cuda*
+ - astra-toolbox::astra-toolbox 2.* cuda*
- cvxpy
- python-wget
- scikit-image
diff --git a/scripts/requirements-test.yml b/scripts/requirements-test.yml
index 496c19b3ad..e3c7a6d76c 100644
--- a/scripts/requirements-test.yml
+++ b/scripts/requirements-test.yml
@@ -18,13 +18,13 @@ channels:
- https://software.repos.intel.com/python/conda
dependencies:
- python >=3.10
- - numpy >=1.23,<2
+ - numpy >=1.23,<3
- libgomp # [linux]
- ccpi::cil-data >=22
- ccpi::tigre 2.6
- - ccpi::ccpi-regulariser 24.0.1
+ - ccpi::ccpi-regulariser 25.0.0 cuda*
- ccpi::tomophantom 2.0.0
- - astra-toolbox 2.1 cuda*
+ - astra-toolbox::astra-toolbox 2.* cuda*
- cvxpy
- python-wget
- scikit-image