diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 56e353443..039b6a351 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -1,5 +1,4 @@ name: Run tests - on: push: branches: [main, dev] @@ -14,6 +13,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest, windows-latest, macos-14, macos-15-intel, macos-latest] + env-type: [pip] include: - os: macos-14 llvm: llvm@15 @@ -30,34 +30,33 @@ jobs: with: python-version: '3.11' - - name: set default compiler on macOS to GCC + - name: Set default compiler on macOS to GCC if: startsWith(matrix.os, 'macos') run: | echo "CC=gcc-15" >> $GITHUB_ENV echo "CXX=g++-15" >> $GITHUB_ENV - - name: Install dependencies for ubuntu-latest + - name: Install system dependencies (Ubuntu) if: matrix.os == 'ubuntu-latest' run: | sudo apt update sudo apt install -y libegl1 libgl1 libxext6 libx11-6 - - name: Upgrade pip and install package + - name: Install with pip + if: matrix.env-type == 'pip' run: | python -m pip install --upgrade pip pip install -e . -v + pip install pytest pytest-mock - name: Smoke test - run: | - python -c "import pyvale" + run: python -c "import pyvale" - name: Run sensorsim and DIC tests - run: | - pip install pytest pytest-mock - pytest -v tests/dic/. tests/sensorsim/. + shell: bash -el {0} + run: pytest -v tests/dic/. tests/sensorsim/. tests/strain/. - name: Run blender tests (skip macOS ARM) if: ${{ !startsWith(matrix.os, 'macos') || contains(matrix.os, 'intel') }} - run: | - pip install pytest pytest-mock - pytest -v tests/blender/. + shell: bash -el {0} + run: pytest -v tests/blender/. diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 680573834..03c3846c6 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -3,9 +3,9 @@ name: Build Wheels and upload (if new release) to PyPI on: workflow_dispatch: push: - branches: [main, dev] + branches: [main] pull_request: - branches: [main, dev] + branches: [main] release: types: - published diff --git a/docs/apidoc.sh b/docs/apidoc.sh index a25c61520..aa37b2bd5 100644 --- a/docs/apidoc.sh +++ b/docs/apidoc.sh @@ -1,4 +1,5 @@ -set -e +#!/usr/bin/env bash +set -e set -o pipefail echo "==========================================" @@ -11,23 +12,16 @@ error_exit() { exit 1 } -# Generate the python API documentation +# Python API documentation sphinx-apidoc -f -o ./source --separate ../src/pyvale/ ../src/pyvale/data/* ../src/pyvale/examples/* || error_exit "sphinx-apidoc failed" # Remove autogenerated TOC file rm -f source/modules.rst || error_exit "Failed to remove modules.rst" -# Generate the C++ API documentation -( cd ./source && doxygen Doxyfile ) || error_exit "doxygen generation failed" - echo "Updating generated RST files..." -# Change title for dataset file -# sed -i '0,/pyvale.dataset/s/pyvale.dataset/pyvale.dataset/' source/pyvale.dataset.rst || error_exit "Failed to update dataset title" -# sed -i '0,/dataset.dataset/s/dataset.dataset/pyvale.dataset/' source/pyvale.dataset.dataset.rst || error_exit "Failed to update dataset title" - # Modules to process -modules=("dic" "blender" "mooseherder" "sensorsim" "verif" "dataset") +modules=("dic" "blender" "mooseherder" "sensorsim" "verif" "dataset" "strain") for mod in "${modules[@]}"; do rst="source/pyvale.${mod}.rst" @@ -59,3 +53,79 @@ rm -f ./source/pyvale.dic.dic2dcpp.rst || error_exit "Failed to remove dic2dcpp. echo "PYTHON API DOCUMENTATION GENERATED SUCCESSFULLY!" +# ------------------------------------------------------------------ +# Generate C++ API documentation +# ------------------------------------------------------------------ +echo "==========================================" +echo " GENERATING C++ API DOCUMENTATION..." +echo "==========================================" + +# Run Doxygen (optional, if you still want it) +( cd ./source && doxygen Doxyfile ) || error_exit "doxygen generation failed" + +# Create RST files for each C++ header +for mod in "${modules[@]}"; do + src_dir="../src/pyvale/${mod}/cpp" + out_dir="source/cpp/${mod}" + + mkdir -p "$out_dir" + + for file in "$src_dir"/*.hpp; do + [ -e "$file" ] || continue + + filename=$(basename "$file") + base="${filename%.hpp}" + rst_file="${out_dir}/${base}.rst" + + echo "Generating $rst_file" + + cat > "$rst_file" < "$index_file" +done + + + +echo "C++ RST FILES GENERATED SUCCESSFULLY!" + diff --git a/docs/requirements.txt b/docs/requirements.txt index 4e3b51ff2..e3d4fa44d 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,37 +1,35 @@ -# --- Core Sphinx Builder --- +# Core Stuff sphinx -babel # For language support/i18n -docutils # For RST parsing (fundamental) -imagesize # For image dimension handling -pygments # For code highlighting -snowballstemmer # For search indexing (used by epub3 builder) +babel +docutils +imagesize +pygments +snowballstemmer +breathe -# --- Theme and Parser --- +# Theme furo alabaster myst-parser -# --- Sphinx Extensions (Directly Listed in conf.py) --- +# nice formatting stuff sphinx-codeautolink sphinx-copybutton -breathe # For C++ documentation integration -sphinx-gallery # For example gallery generation +sphinx-gallery +sphinx-design +sphinx-autodoc-typehints +ipywidgets +matplotlib +pillow -# --- Extension Dependencies --- -sphinx-autodoc-typehints # You use 'autodoc' which benefits from this -ipywidgets # Often required by sphinx-gallery examples (e.g., matplotlib output) -matplotlib # Required for plotting in sphinx-gallery examples -pillow # Required for various image/gallery operations - -# --- Specific sphinxcontrib Extensions (Explicitly added to resolve previous errors) --- -sphinxcontrib-applehelp # Needed if the epub3 builder triggers it (which is common) +sphinxcontrib-applehelp sphinxcontrib-devhelp sphinxcontrib-htmlhelp sphinxcontrib-jsmath -sphinxcontrib-qthelp # Added proactively for completeness +sphinxcontrib-qthelp sphinxcontrib-serializinghtml -# --- Your Project Dependencies (From your original list) --- +# other random stuff certifi idna jinja2 diff --git a/docs/source/Doxyfile b/docs/source/Doxyfile index afc58f73a..6f647f5fc 100644 --- a/docs/source/Doxyfile +++ b/docs/source/Doxyfile @@ -943,7 +943,7 @@ WARN_LOGFILE = # spaces. See also FILE_PATTERNS and EXTENSION_MAPPING # Note: If this tag is empty the current directory is searched. -INPUT = ../../src/pyvale/dic/cpp +INPUT = ../../src/pyvale/dic/cpp ../../src/pyvale/strain/cpp # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses diff --git a/docs/source/_static/custom.css b/docs/source/_static/custom.css index 1616a045e..10bf7d4a3 100644 --- a/docs/source/_static/custom.css +++ b/docs/source/_static/custom.css @@ -9,3 +9,7 @@ color: #0066cc !important; /* Monokai-style cyan */ } +.code-scroll { + max-height: 400px; + overflow: auto; +} diff --git a/docs/source/_static/hrdic.png b/docs/source/_static/hrdic.png new file mode 120000 index 000000000..0abafd6da --- /dev/null +++ b/docs/source/_static/hrdic.png @@ -0,0 +1 @@ +../../../images/hrdic.png \ No newline at end of file diff --git a/docs/source/api_cpp.rst b/docs/source/api_cpp.rst index 10af8da29..28be3091e 100644 --- a/docs/source/api_cpp.rst +++ b/docs/source/api_cpp.rst @@ -2,21 +2,9 @@ Detailed C++ API ================= -Digital Image Correlation -------------------------- - - .. toctree:: :maxdepth: 1 - :caption: C++ Header Files + :caption: Modules - cpp/dicbruteforce - cpp/dicfourier - cpp/dicinterpolator - cpp/dicmain - cpp/dicoptimizer - cpp/dicrg - cpp/dicscanmethod - cpp/dicsmooth - cpp/dicstrain - cpp/dicutil + ./cpp/cpp_dic.rst + ./cpp/cpp_strain.rst diff --git a/docs/source/api_py.rst b/docs/source/api_py.rst index 1600d8948..b24792e01 100644 --- a/docs/source/api_py.rst +++ b/docs/source/api_py.rst @@ -10,6 +10,7 @@ Detailed Python API pyvale.sensorsim pyvale.blender pyvale.dic + pyvale.strain pyvale.mooseherder pyvale.verif pyvale.dataset diff --git a/docs/source/cite.rst b/docs/source/cite.rst index 316d304aa..66aa5c53f 100644 --- a/docs/source/cite.rst +++ b/docs/source/cite.rst @@ -4,4 +4,28 @@ Citing Pyvale If Pyvale has contributed to your research or work please acknowledge the project in your academic puplications using the following citation: +.. tab-set:: + .. tab-item:: APA + + Hirst, J., Sibson, L., Tayeb, A., Poole, B., Sampson, M., Bielajewa, W., ... & Fletcher, L. (2026). + PYVALE: A Fast, Scalable, Open-Source 2D Digital Image Correlation (DIC) Engine + Capable of Handling Gigapixel Images. + *arXiv preprint arXiv:2601.12941*. + + .. tab-item:: MLA + + Hirst, Joel, et al. "PYVALE: A Fast, Scalable, Open-Source 2D Digital Image Correlation + (DIC) Engine Capable of Handling Gigapixel Images." + *arXiv preprint arXiv:2601.12941* (2026). + + .. tab-item:: Bibtex + + .. code-block:: + + @article{pyvale2026, + title={PYVALE: A Fast, Scalable, Open-Source 2D Digital Image Correlation (DIC) Engine Capable of Handling Gigapixel Images}, + author={Hirst, Joel and Sibson, Lorna and Tayeb, Adel and Poole, Ben and Sampson, Megan and Bielajewa, Wiera and Atkinson, Michael and Marsh, Alex and Spencer, Rory and Hamill, Rob and others}, + journal={arXiv preprint arXiv:2601.12941}, + year={2026} + } diff --git a/docs/source/conf.py b/docs/source/conf.py index 8b2c296cd..edfac6c14 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -18,8 +18,8 @@ project = 'Pyvale' copyright = '2025, The CAV Team' author = 'The CAV Team at United Kingdom Atomic Energy Authority (UKAEA)' -release = '2026.1.3' -version = '2026.1.3' +release = '2026.2.0' +version = '2026.2.0' # -- General configuration --------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration @@ -28,10 +28,13 @@ 'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.autosummary', - 'sphinx.ext.viewcode', # Adds source code links + 'sphinx.ext.intersphinx', + 'sphinx_design', + 'sphinx.ext.viewcode', 'sphinx_codeautolink', 'sphinx_copybutton', 'sphinx_gallery.gen_gallery', + 'sphinx.ext.mathjax', 'breathe', 'myst_parser' ] diff --git a/docs/source/cpp/dicbruteforce.rst b/docs/source/cpp/dicbruteforce.rst deleted file mode 100644 index b63e258a9..000000000 --- a/docs/source/cpp/dicbruteforce.rst +++ /dev/null @@ -1,6 +0,0 @@ -dicbruteforce.hpp -================== - -.. doxygenfile:: dicbruteforce.hpp - :project: pyvale - :path: ../src/pyvale/dic/cpp \ No newline at end of file diff --git a/docs/source/cpp/dicfourier.rst b/docs/source/cpp/dicfourier.rst deleted file mode 100644 index 7f22abf03..000000000 --- a/docs/source/cpp/dicfourier.rst +++ /dev/null @@ -1,6 +0,0 @@ -dicfourier.hpp -================= - -.. doxygenfile:: dicfourier.hpp - :project: pyvale - :path: ../src/pyvale/dic/cpp diff --git a/docs/source/cpp/dicinterpolator.rst b/docs/source/cpp/dicinterpolator.rst deleted file mode 100644 index d49347c22..000000000 --- a/docs/source/cpp/dicinterpolator.rst +++ /dev/null @@ -1,6 +0,0 @@ -dicinterpolator.hpp -===================== - -.. doxygenfile:: dicinterpolator.hpp - :project: pyvale - :path: ../src/pyvale/dic/cpp diff --git a/docs/source/cpp/dicmain.rst b/docs/source/cpp/dicmain.rst deleted file mode 100644 index 74aed9645..000000000 --- a/docs/source/cpp/dicmain.rst +++ /dev/null @@ -1,6 +0,0 @@ -dicmain.hpp -================== - -.. doxygenfile:: dicmain.hpp - :project: pyvale - :path: ../src/pyvale/dic/cpp \ No newline at end of file diff --git a/docs/source/cpp/dicoptimizer.rst b/docs/source/cpp/dicoptimizer.rst deleted file mode 100644 index 1c22fb558..000000000 --- a/docs/source/cpp/dicoptimizer.rst +++ /dev/null @@ -1,6 +0,0 @@ -dicoptimizer.hpp -================== - -.. doxygenfile:: dicoptimizer.hpp - :project: pyvale - :path: ../src/pyvale/dic/cpp \ No newline at end of file diff --git a/docs/source/cpp/dicrg.rst b/docs/source/cpp/dicrg.rst deleted file mode 100644 index be6212ed7..000000000 --- a/docs/source/cpp/dicrg.rst +++ /dev/null @@ -1,6 +0,0 @@ -dicrg.hpp -================== - -.. doxygenfile:: dicrg.hpp - :project: pyvale - :path: ../src/pyvale/dic/cpp \ No newline at end of file diff --git a/docs/source/cpp/dicscanmethod.rst b/docs/source/cpp/dicscanmethod.rst deleted file mode 100644 index 251b21d0c..000000000 --- a/docs/source/cpp/dicscanmethod.rst +++ /dev/null @@ -1,6 +0,0 @@ -dicscanmethod.hpp -================== - -.. doxygenfile:: dicscanmethod.hpp - :project: pyvale - :path: ../src/pyvale/dic/cpp \ No newline at end of file diff --git a/docs/source/cpp/dicsmooth.rst b/docs/source/cpp/dicsmooth.rst deleted file mode 100644 index 5a0b2f50c..000000000 --- a/docs/source/cpp/dicsmooth.rst +++ /dev/null @@ -1,6 +0,0 @@ -dicsmooth.hpp -================== - -.. doxygenfile:: dicsmooth.hpp - :project: pyvale - :path: ../src/pyvale/dic/cpp \ No newline at end of file diff --git a/docs/source/cpp/dicstrain.rst b/docs/source/cpp/dicstrain.rst deleted file mode 100644 index f6d94636d..000000000 --- a/docs/source/cpp/dicstrain.rst +++ /dev/null @@ -1,6 +0,0 @@ -dicstrain.hpp -================== - -.. doxygenfile:: dicstrain.hpp - :project: pyvale - :path: ../src/pyvale/dic/cpp \ No newline at end of file diff --git a/docs/source/cpp/dicutil.rst b/docs/source/cpp/dicutil.rst deleted file mode 100644 index f453983d5..000000000 --- a/docs/source/cpp/dicutil.rst +++ /dev/null @@ -1,6 +0,0 @@ -dicutil.hpp -================== - -.. doxygenfile:: dicutil.hpp - :project: pyvale - :path: ../src/pyvale/dic/cpp \ No newline at end of file diff --git a/docs/source/examples/examples.rst b/docs/source/examples/examples.rst index f3f38c027..9a4d3be7d 100644 --- a/docs/source/examples/examples.rst +++ b/docs/source/examples/examples.rst @@ -10,7 +10,7 @@ Sensor Simulation Basics examples_basics_sensorsim -Digital Image Correlation +DIC & Strain Calculations ------------------------- .. toctree:: :maxdepth: 2 diff --git a/docs/source/examples/examples_dic.rst b/docs/source/examples/examples_dic.rst index 3e2afe562..150543b07 100644 --- a/docs/source/examples/examples_dic.rst +++ b/docs/source/examples/examples_dic.rst @@ -1,6 +1,6 @@ .. _examples_dic: -Digital Image Correlation (DIC) +DIC & Strain Calculations ================================ .. toctree:: @@ -11,4 +11,5 @@ Digital Image Correlation (DIC) dic/ex3_plate_with_hole_strain.rst dic/ex4_dic_blender.rst dic/ex5_dic_challenge.rst + dic/ex6_hrdic.rst diff --git a/docs/source/guide/guide_blender.rst b/docs/source/guide/guide_blender.rst deleted file mode 100644 index f1e04419c..000000000 --- a/docs/source/guide/guide_blender.rst +++ /dev/null @@ -1,4 +0,0 @@ -.. _guide_blender: - -Blender Guide -============= diff --git a/docs/source/guide/guide_dic.rst b/docs/source/guide/guide_dic.rst deleted file mode 100644 index 43918f36f..000000000 --- a/docs/source/guide/guide_dic.rst +++ /dev/null @@ -1,4 +0,0 @@ -.. _guide_dic: - -Digital Image Correlation Guide -=============================== diff --git a/docs/source/dev_guide/dev_guide.rst b/docs/source/guide_dev/dev_guide.rst similarity index 100% rename from docs/source/dev_guide/dev_guide.rst rename to docs/source/guide_dev/dev_guide.rst diff --git a/docs/source/dev_guide/dev_guide_customsensors.rst b/docs/source/guide_dev/dev_guide_customsensors.rst similarity index 100% rename from docs/source/dev_guide/dev_guide_customsensors.rst rename to docs/source/guide_dev/dev_guide_customsensors.rst diff --git a/docs/source/dev_guide/dev_guide_designspec.rst b/docs/source/guide_dev/dev_guide_designspec.rst similarity index 100% rename from docs/source/dev_guide/dev_guide_designspec.rst rename to docs/source/guide_dev/dev_guide_designspec.rst diff --git a/docs/source/dev_guide/dev_guide_python.rst b/docs/source/guide_dev/dev_guide_python.rst similarity index 100% rename from docs/source/dev_guide/dev_guide_python.rst rename to docs/source/guide_dev/dev_guide_python.rst diff --git a/docs/source/guide_theory/guide_theory.rst b/docs/source/guide_theory/guide_theory.rst new file mode 100644 index 000000000..f22e2bc1b --- /dev/null +++ b/docs/source/guide_theory/guide_theory.rst @@ -0,0 +1,14 @@ +.. _guide_theory: + +Theory Guide +================================= + +Module user guides +------------------ + +.. toctree:: + :maxdepth: 1 + + guide_theory_dic.rst + + diff --git a/docs/source/guide_theory/guide_theory_dic.rst b/docs/source/guide_theory/guide_theory_dic.rst new file mode 100644 index 000000000..51fd44c64 --- /dev/null +++ b/docs/source/guide_theory/guide_theory_dic.rst @@ -0,0 +1,318 @@ +.. _guide_theory_dic: + +DIC Theory Overview +====================================== + +Digital Image Correlation (DIC) is a technique for measuring +deformation, displacement, and strain fields by analyzing a sequence of images captured +during a loading process. At its core, DIC tracks the movement of patterns and textures between images, +and from this motion, deduces how the material has deformed. + +Local Subset DIC +---------------- +In Local subset DIC, images are divided into small :math:`N \times N` pixel regions which are called *subsets*. +By comparing the intensity patterns of these subsets across images, we can estimate +their displacement. The image below shows the difference (or cost), where 0 is a perfect match, +for a brute force scan along the x-axis. + + +.. figure:: guide_theory_dic_cost.gif + :alt: Example Cost Minimization + + +Typically we don't use a brute-force approach, but instead use an **optimization algorithm** that is much more computationally +efficient. The optimization attempts to minimize the difference between the +reference and deformed subset by using the gradient. You can see from the +brute-force approach that there are local minima where the optimizer might get +stuck, so it's important to ensure that any initial guess is reasonably +close to the actual parameters that define the mapping from the subset in the +reference image to the subset in the deformed image. + +Shape Functions +--------------- +It's often the case that a subset undergoes more complex deformation than just rigid translation between reference and deformed image. +To model how a subset deforms more generally, we introduce *shape functions*. These are +mathematical mappings that describe how points inside a subset move relative to +each other. Pyvale supports three commonly used shape functions in DIC. The simplest of course is a pure *rigid* translation (two +parameters: horizontal and vertical shift). After this comes *affine* and +*quadratic* shape functions: + +.. math:: + \xi(x_i,y_i, \mathbf{p}) = + \underbrace{\begin{bmatrix} p_0 \\ p_1 \end{bmatrix}}_{\text{rigid}} + + \underbrace{\begin{bmatrix} 1+p_2 & p_3 \\ p_4 & 1+p_5 \end{bmatrix} + \begin{bmatrix} x_i \\ y_i \end{bmatrix}}_{\text{affine}} + \underbrace{\begin{bmatrix} p_6 & p_7 & p_8 \\ p_9 & p_{10} & p_{11} \end{bmatrix} + \begin{bmatrix} x_i^2 \\ x_iy_i \\ y_i^2 \end{bmatrix}}_{\text{quadratic}} + +Each higher-order shape function includes all terms from the lower-order functions: affine includes rigid terms, and quadratic includes both affine and rigid terms. + +.. image:: guide_theory_dic_shape_functions_light.png + :class: only-light + :alt: Diagram (light) + +.. image:: guide_theory_dic_shape_functions_dark.png + :class: only-dark + :alt: Diagram (dark) + +Cost Functions / Correlation Criterion +--------------------------------------- +How do we decide if two subsets *match*? To do this we use what's known as a cost function, or often referred to as a Correlation Criterion. +This is a numerical measure of similarity between the reference subset and the deformed +subset. **Pyvale supports three choices:** + +SSD +^^^^ + +.. math:: + C_{\text{SSD}} = \sum_i \big(f(x_{i},y_{i}) - g(x_{i},y_{i})\big)^{2} + +NSSD +^^^^^ + +.. math:: + C_{\text{NSSD}} = \sum_i \left( \frac{f(x_{i},y_{i})}{\sqrt{\sum_{j} f(x_{j},y_{j})^{2}}} - \frac{g(x_{i},y_{i})}{\sqrt{\sum_{j} g(x_{j},y_{j})^{2}}} \right)^{2} + +ZNSSD +^^^^^^ + +.. math:: + \text{ZNSSD} = \sum_i \left( \frac{\bar{f}(x_{i},y_{i})}{\sqrt{\sum_{j} \bar{f}(x_{j},y_{j})^{2}}} - \frac{\bar{g}(x_{i},y_{i})}{\sqrt{\sum_{j} \bar{g}(x_{j},y_{j})^{2}}} \right)^{2} + +where :math:`f(x_{i},y_{i})` and :math:`g(x_{i},y_{i})` represent the gray-level intensity values at location :math:`(x_{i},y_{i})` in the reference and deformed images. +:math:`\bar{f}(x_{i},y_{i}) = f(x_{i},y_{i}) - f_m`, and :math:`f_m` is the mean gray-level value of the subset. + +Cost Function Optimization +---------------------------- +Minimizing the value of the cost function requires using an optimization routine. +These algorithms start with an initial guess and refine it step by step. Convergence depends on a good +initial guess and a generally well-behaved cost surface. +Pyvale uses a **Levenberg-Marquardt** non-linear optimization routine to minimize the cost function. +The algorithm proceeds iteratively by updating the shape-function parameter +vector :math:`\mathbf{p}`. At iteration :math:`i`, the parameters are updated according to + +.. math:: + + \mathbf{p}_{i+1} + = \mathbf{p}_{i} + - \left( \mathbf{H} + \lambda\,\mathrm{diag}\!\left[\mathbf{H}\right] \right)^{-1} + \nabla C(\mathbf{p}_{i}), + +where :math:`\nabla C(\mathbf{p}_{i})` denotes the gradient of the cost function +evaluated at the current parameters. The matrix :math:`\mathbf{H}` is an +approximation to the Hessian, taken here as +:math:`\mathbf{H} \approx \mathbf{J}\mathbf{J}^{\mathsf{T}}`, with +:math:`\mathbf{J}` the Jacobian of the residuals with respect to +:math:`\mathbf{p}`. The scalar :math:`\lambda > 0` is the +Levenberg–Marquardt damping factor, which controls the balance between +gradient-descent and Gauss–Newton behavior. + +After computing the updated parameter vector :math:`\mathbf{p}_{i+1}`, the cost +function is re-evaluated to assess the quality of the update. Based on the +resulting change in error, the damping parameter :math:`\lambda` is adjusted. +If the cost **increases**, don't update the shape function parameters and +**increase** the damping by a factor of 10. +If the cost **decreases**, the update is accepted and the damping is **reduced** by a factor of 10. +This update–evaluation–adjustment cycle is repeated until convergence criteria +based on both parameter change and cost-function reduction are satisfied. The +magnitude of the parameter update is quantified by + +.. math:: + + \text{d}p_{\text{norm}} + = \sqrt{\langle \Delta\mathbf{p}, \Delta\mathbf{p} \rangle}, + \qquad + p_{\text{norm}} + = \sqrt{\langle \mathbf{p}, \mathbf{p} \rangle}, + +where :math:`\Delta\mathbf{p} = \mathbf{p}_{i+1} - \mathbf{p}_{i}`, and +:math:`\langle \cdot,\cdot \rangle` denotes the Euclidean inner product. From +these quantities, the relative parameter-update tolerance is defined as + +.. math:: + + x_{\text{tol}} + = \frac{\text{d}p_{\text{norm}}}{p_{\text{norm}} + \varepsilon}, + +with :math:`\varepsilon` a small regularizing constant included to avoid division +by zero. Convergence in terms of the cost function is measured using + +.. math:: + + f_{\text{tol}} + = \frac{\lvert C_{i+1} - C_{i} \rvert}{\lvert C_{i} \rvert + \varepsilon}. + +Iteration terminates once both :math:`x_{\text{tol}}` and +:math:`f_{\text{tol}}` fall below user-specified thresholds, indicating that +further updates would produce only negligible changes in the parameters and the +cost function. + + +Sub-pixel Accuracy With Interpolation +-------------------------------------- + +DIC aims for precision beyond the integer values defined by the image's pixel grid. Integer-pixel +matching is a good start, but physical displacements do not perfectly map to pixel +boundaries. To capture this, we refine the measurement to *sub-pixel* +accuracy using interpolation. Instead of treating the image as a discrete +array, we approximate it as a smooth surface. This is done using **cubic B-spline +interpolation**. + +Cubic B-spline Interpolation +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Cubic B-spline interpolation represents the interpolated function as a weighted sum of shifted B-spline basis functions: + +.. math:: + f(x,y) = \sum_{i,j} c_{i,j} \, \beta^3(x - i) \, \beta^3(y - j) + +where :math:`c_{i,j}` are B-spline coefficients and :math:`\beta^3(t)` is the cubic B-spline basis function: + +.. math:: + + \beta^3(t) = \frac{1}{6} \begin{cases} + (2 - |t|)^3 & \text{if } 1 \leq |t| < 2 \\ + 4 - 6t^2 + 3|t|^3 & \text{if } |t| < 1 \\ + 0 & \text{if } |t| \geq 2 + \end{cases} + +Prefiltering +^^^^^^^^^^^^^^^^^^ +To ensure that the B-spline interpolation passes through the original intensity values, we +can apply a filter to get the B-spline coefficients. +For a 1D row of intensity data :math:`\{f_0, f_1, \ldots, f_{N-1}\}`, the filtering has three steps. +First is to apply a normalization: + +.. math:: + c_i^{(0)} = (1-z)(1-1/z) f_i + +with pole :math:`z = \sqrt{3} - 2`. Then apply a **causal filter**: + +.. math:: + c_i^{(+)} = c_i^{(0)} + z \, c_{i-1}^{(+)}, \quad i = 1, 2, \ldots, N-1 + +with an initial condition of :math:`c_0^{(+)} = c_0^{(0)}`. The final step is +to apply an **anticausal filter**. Starting at the end of the row: + +.. math:: + c_{N-1} = \frac{z}{z^2-1} c_{N-1}^{(+)} + +and then applying: + +.. math:: + c_i = z(c_{i+1} - c_i^{(+)}), \quad i = N-2, N-3, \ldots, 0 + +This 1D filtering can be done for each row of the image to get a list of coefficients :math:`c_{i,j}^{(x)}`. It can then be applied to the columns of the result to +get a full list of coefficients :math:`c_{i,j}`. + +Evaluation +^^^^^^^^^^ +To get the intensity value at an arbitrary subpixel location :math:`f(x,y)` we +compute the local coordinates: + +.. math:: + + t_x = x - \mathrm{floor}(x), \quad t_y = y - \mathrm{floor}(y) + +then evaluate the basis functions for the local coordinates: + +.. math:: + B_0(t) &= (1-t)^3/6 \\ + B_1(t) &= (3t^3 - 6t^2 + 4)/6 \\ + B_2(t) &= (-3t^3 + 3t^2 + 3t + 1)/6 \\ + B_3(t) &= t^3/6 + +and compute the interpolated value: + +.. math:: + + f(x,y) = \sum_{k=0}^{3} \sum_{\ell=0}^{3} c_{i_x+k-1, i_y+\ell-1} \, B_k(t_x) \, B_\ell(t_y) + + +.. list-table:: + :width: 85% + :class: borderless + + * - .. figure:: guide_theory_dic_preinterp.png + :width: 100% + + - .. figure:: guide_theory_dic_interp.png + :width: 100% + +Reliability-Guided DIC (RG-DIC) +-------------------------------- +It's highly likely that some subsets will poorly correlate due to texture changes, cracks, noise, changes in lighting, or large local deformations. +Reliability-Guided DIC (RG-DIC) was a method developed by `B. Pan (2009) `_ that helps to limit +the amount of poor results by correlating subsets in an order determined by the magnitude of the correlation coefficient. +The algorithm proceeds as follows: + +#. The user selects an initial seed location. Correlation is performed at the + seed location and its 4 neighboring points. These points are marked as computed + in a global mask. +#. The four points are added to a queue ordered from highest correlation + coefficient to lowest. +#. The point at the top of the queue is removed. Correlation is then performed + for its uncomputed neighbors. Successful correlations from previously computed neighboring subsets + are used as initial conditions for the optimization routine. +#. Newly computed points are updated in the global mask and added to the queue. +#. The algorithm then expands outward in a front-propagation style until all + subsets have been computed. + +.. figure:: guide_theory_dic_rgdic.gif + :width: 80% + :align: center + +Strain Calculations & Formulations +----------------------------------- +The in-plane strain components can be calculated using the spatial derivatives of the displacement field to capture local changes in geometry. +The 2D deformation gradient matrix, :math:`\mathbf{F}`, is given by: + +.. math:: + + \mathbf{F} = + \begin{bmatrix} + 1+\dfrac{\partial u_x}{\partial x} & \dfrac{\partial u_x}{\partial y} \\ + \dfrac{\partial u_y}{\partial x} & 1+\dfrac{\partial u_y}{\partial y} + \end{bmatrix} + = \mathbf{I} + \nabla \boldsymbol{u}, + \qquad + \boldsymbol{u} = + \begin{bmatrix} + u_x \\ u_y + \end{bmatrix}. + +To calculate the partial derivatives above, we compute the gradient over a square window containing :math:`N \times N` displacement data points from the DIC calculation. +Because we are using displacement values from DIC, the strain window is dependent on the subset-step, :math:`s`, and subset-size, :math:`w`. +These quantities, along with the size of the strain window, form what is typically referred to as the Virtual Strain Gauge, :math:`VSG`: + +.. math:: + + VSG = (\mathrm{N} - 1)s + w + +Due to the noise in DIC measurements, smoothing is typically applied over the strain window. +We support bilinear and biquadratic smoothing over the strain window elements. +The polynomial approximation is given by: + +.. math:: + + \boldsymbol{u}(x,y) = \mathbf{P}(x,y)\,\mathbf{c}, + +where :math:`\boldsymbol{u} = [\,u_x\;u_y\,]^{T}`, :math:`\mathbf{P}(x,y)` is a row vector of basis terms, and :math:`\mathbf{c}` is a column vector of coefficients. The polynomial basis is: + +.. math:: + + \mathbf{P}(x,y) = + \begin{cases} + [1, x, y] & \text{bilinear}, \\ + [1, x, y, x^2, y^2, x^2 y, x y^2, x^2 y^2] & \text{biquadratic} + \end{cases} + +The coefficients are obtained by solving a linear least-squares problem, since the displacement field is linearly parameterized in the unknown coefficients. +Once the polynomial coefficients have been obtained, the deformation gradient tensor and strain can be calculated. We currently support the following strain tensor formulations: + +- ``strain_formulation="HENCKY"``, *Hencky (logarithmic)*: :math:`\boldsymbol{\varepsilon}=\ln(\sqrt{\mathbf{F}^\mathsf{T}\mathbf{F}})` +- ``strain_formulation="GREEN"``, *Green–Lagrange*: :math:`\boldsymbol{\varepsilon}=\tfrac{1}{2}\left(\mathbf{F}^\mathsf{T}\mathbf{F}-\mathbf{I}\right)` +- ``strain_formulation="ALMANSI"``, *Euler–Almansi*: :math:`\boldsymbol{\varepsilon}=\tfrac{1}{2}\left(\mathbf{I}-(\mathbf{F}\mathbf{F}^\mathsf{T})^{-1}\right)` +- ``strain_formulation="BIOT_LAGRANGE"``, *Biot (right / Lagrangian)*: :math:`\boldsymbol{\varepsilon}=\sqrt{\mathbf{F}^\mathsf{T}\mathbf{F}}-\mathbf{I}` +- ``strain_formulation="BIOT_EULER"``, *Biot (left / Eulerian)*: :math:`\boldsymbol{\varepsilon}=\sqrt{\mathbf{F}\mathbf{F}^\mathsf{T}}-\mathbf{I}` + +where :math:`\mathbf{I}` is the :math:`2 \times 2` identity matrix. diff --git a/docs/source/guide_theory/guide_theory_dic_cost.gif b/docs/source/guide_theory/guide_theory_dic_cost.gif new file mode 100644 index 000000000..78e59689b Binary files /dev/null and b/docs/source/guide_theory/guide_theory_dic_cost.gif differ diff --git a/docs/source/guide_theory/guide_theory_dic_interp.png b/docs/source/guide_theory/guide_theory_dic_interp.png new file mode 100644 index 000000000..086dfb4db Binary files /dev/null and b/docs/source/guide_theory/guide_theory_dic_interp.png differ diff --git a/docs/source/guide_theory/guide_theory_dic_preinterp.png b/docs/source/guide_theory/guide_theory_dic_preinterp.png new file mode 100644 index 000000000..2e9d6bdfd Binary files /dev/null and b/docs/source/guide_theory/guide_theory_dic_preinterp.png differ diff --git a/docs/source/guide_theory/guide_theory_dic_rgdic.gif b/docs/source/guide_theory/guide_theory_dic_rgdic.gif new file mode 100644 index 000000000..01742ff94 Binary files /dev/null and b/docs/source/guide_theory/guide_theory_dic_rgdic.gif differ diff --git a/docs/source/guide_theory/guide_theory_dic_shape_functions_dark.png b/docs/source/guide_theory/guide_theory_dic_shape_functions_dark.png new file mode 100644 index 000000000..503bacb39 Binary files /dev/null and b/docs/source/guide_theory/guide_theory_dic_shape_functions_dark.png differ diff --git a/docs/source/guide_theory/guide_theory_dic_shape_functions_light.png b/docs/source/guide_theory/guide_theory_dic_shape_functions_light.png new file mode 100644 index 000000000..3b686fa43 Binary files /dev/null and b/docs/source/guide_theory/guide_theory_dic_shape_functions_light.png differ diff --git a/docs/source/guide_user/guide_blender.rst b/docs/source/guide_user/guide_blender.rst new file mode 100644 index 000000000..2656a6d9f --- /dev/null +++ b/docs/source/guide_user/guide_blender.rst @@ -0,0 +1,4 @@ +.. _guide_blender: + +Blender User Guide +================== diff --git a/docs/source/guide_user/guide_dic.rst b/docs/source/guide_user/guide_dic.rst new file mode 100644 index 000000000..a29904fb9 --- /dev/null +++ b/docs/source/guide_user/guide_dic.rst @@ -0,0 +1,481 @@ +.. _guide_dic: + +DIC User Guide +============== + +**This page should be conisered a guide to the core DIC user interactions and useful +tips and tricks for getting good results in a reasonable amount of time. +Specific examples can be found in the** :ref:`examples_all` **section**. +**If you are after a runthrough of the mathematics of the DIC engine, you can find +that in the** :ref:`guide_theory_dic`. + + + + +Importing the DIC modules +-------------------------- + +After installing Pyvale, you'll need to importing the relevent modules before +going any further. We like to import the modules in the following way: + +.. code-block:: Python + + import pyvale.dic as dic + import pyvale.strain as strain + + +The Pyvale DIC workflow +----------------------- + +.. figure:: guide_dic_flowchart.png + :alt: DIC flowchart + :width: 60% + + +Region Of interest (ROI) Selection +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Basic Selection +"""""""""""""""""" + +For any DIC calculation the user must first specify the **region of interest +(ROI)** for their calculation. You can either use Pyvale (see the first DIC +example for a detailed walkthrough), or, you can create it using Numpy. +Basic approaches are summarized below: + +.. tab-set:: + + .. tab-item:: Interactive ROI Pyvale + + + .. code-block:: python + + roi = dic.RegionOfInterest(ref_image="./ref_img.tiff") # initialization + roi.interactive_selection(subset_size=31) # ROI GUI will launch + + dic.calculate_2d( + ..., + roi=roi.mask, + seed=roi.seed, + ..., + ) + + .. tab-item:: Programmatic ROI Pyvale + + + .. code-block:: Python + + roi = dic.RegionOfInterest(ref_image="./ref_img.tiff") # initialization + roi.rect_boundary(left=100,right=100,bottom=100,top=100) # exclude a 100 pixel boundary + + dic.calculate_2d( + ..., + roi=roi.mask, + seed=[500, 500], # seed at centre of image + ..., + ) + + .. tab-item:: Programmatic ROI Numpy + + + .. code-block:: Python + + arr = np.zeros((1000, 1000), dtype=bool) # image is 1000x1000 + arr[100:900, 100:900] = True # Exclude a 100 pixel boundary + + dic.calculate_2d( + ..., + roi=arr, + seed=[500, 500], # seed at centre of image + ..., + ) + +Behind the scenes ``dic.RegionOfInterest`` initializes ``roi.mask`` as a ``np.ndarray`` with +the same dimensions as the reference image. You can then manipulate it as you +would with any 2D Numpy array before passing to the DIC engine. + +Defining a ROI with a YAML file +"""""""""""""""""""""""""""""""""" + +Another option is to use a YAML file for the ROI. When using +``roi.interactive_selection`` you can save and open ROI configurations easily +within the GUI. In most cases, it will be easier to create and save the ROI YAML using +the GUI and use that for all future calculations. It will also give you a sense +of how the YAML is structured. + +Each entry in the YAML file describes a specific ROI object with a specific +type. The ROI shape can be specified with: + +* ``RectROI`` +* ``CircleROI`` +* ``PolyLineROI`` + +For each ROI shape, you must also specify the Boolean flag ``add`` which +indicates whether the ROI shape is being added (``true``) or subtracted (``false``) +from the mask. + +There are shape specific fields: + +* ``pos``: For rectangular, circular, and seed ROIs: the top-left or center position as [x, y]. +* ``size``: For RectROI and SeedROI: [width, height]. For CircleROI: diameter values (same number repeated). +* ``points``: For ``PolyLineROI`` an ordered list of [x, y] vertices defining the polyline. + + +The order in which they are defined in the YAML file determines the layering. +The ROI object at the top of the file will be the bottom most layer. Any +later additions or subtractions will be applied to previously defined ROI +objects. + +You can also specify the seed location using ``SeedROI``. This requires the same +arguments as ``RectRoi``. Ensure that this has been configured with ``add: true`` and the width and height +fields for ``size`` are identical. + +An example of an ROI configured in YAML side-by-side with it loaded into the ROI +GUI can be found below: + + +.. list-table:: + :widths: 50 50 + :align: left + + * - .. container:: code-scroll + + .. code-block:: yaml + + - type: RectROI + pos: + - 34 + - 37 + size: + - 433 + - 588 + add: true + - type: RectROI + pos: + - 322 + - 53 + size: + - 123 + - 225 + add: false + - type: CircleROI + pos: + - 371 + - 536 + size: + - 133 + - 133 + add: false + - type: PolyLineROI + points: + - - 158 + - 422 + - - 85 + - 482 + - - 88 + - 584 + - - 222 + - 586 + - - 227 + - 480 + add: false + - type: SeedROI + pos: + - 108 + - 145 + size: + - 41 + - 41 + add: true + + - .. image:: ./guide_dic_interactive.png + :height: 400px + + +Performing a correlation +^^^^^^^^^^^^^^^^^^^^^^^^ + +The next step is to perform a correlation with the ``dic.calculate_2d`` function. +There are a few arguments that **must** be passed. These are the +rference and deformed images, the roi mask and seed, as well as the subset size +and subset step: + +.. code-block:: Python + + dic.calculate_2d( + reference=ref_image, # Can be str | pathlib.Path | np.ndarray + deformed=def_image, # Can be str | pathlib.Path | np.ndarray + roi_mask=roi.mask, # np.ndarray + seed=roi.seed, # pair of intergers. Allowed as list[int] | list[np.int32] | np.ndarray + subset_size=31, # must be an odd number + subset_step=15 + ) + +All other possible arguments will have default values. See +the API documentation for this function for more details. + +Understanding Output files +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The next step is to understand the output. By default the results will be saved +in the users current working directory in human readable .CSV format with a filename prefix of +``dic_results_`` followed by the name of the deformed image. + +**The output will have the following columns:** + +- ``subset_x``: + X-coordinate of the center of the subset (or window) used in displacement tracking or correlation analysis. + +- ``subset_y``: + Y-coordinate of the center of the subset used in displacement tracking or correlation analysis. + +- ``displacement_u``: + Displacement in the X-direction (horizontal) calculated for the subset. + +- ``displacement_v``: + Displacement in the Y-direction (vertical) calculated for the subset. + +- ``displacement_mag``: + Magnitude of the displacement vector. + +- ``converged``: + Boolean flag indicating whether the displacement calculation algorithm converged for this subset. + +- ``cost``: + The final value of the cost function used during the displacement calculation. + The reported value is always given as the ZNCC value no matter if the SSD, NSSD or ZNSSD has been chosen as the correlation function. + The ZNCC is calculated with the final parameter values from the last optimizer iteration. + +- ``ftol``: + The final value of the function tolerance, a measure of how much the cost function changed between iterations at convergence. + +- ``xtol``: + The final value of the solution tolerance, a measure of how much the solution (displacement) changed between iterations at convergence. + +- ``num_iterations``: + The number of iterations the algorithm took to converge for this subset. + +You can alter the path, delimiter and filename prefix of the output file using the arguments +:code:`output_basepath`, :code:`output_delimiter` and :code:`output_prefix`. You +can also opt to save results in binary format. This can be done by setting +:code:`output_binary=True`. + +Importing DIC Results +^^^^^^^^^^^^^^^^^^^^^ + +Once you have finished your correlation, you can proceed with any +visualization and post-processing tools/software. Pyvale does provide +the capability to read the DIC data using a single command into a +dataclass that can be utilized for simple plotting. Importing data is done with +the ``dic.import_2d`` command. The below highlights how to import data and +create a simple plot of the displacement: + +.. code-block:: Python + + import matplotlib.pyplot as plt + + dic_data = dic.import_2d(data="./dic_results_*") + + # plot of vertical displacement for first deformation image. + plt.pcolor(dic_data.ss_x, + dic_data.ss_y, + dic_data.u_y[0]) # [image, y, x] + +The import will find all files in the current working directory with that +filname prefix. If you have changed :code:`output_delimiter` prior to the +correlation you will also need to specify the delimiter when importing the data. + +Strain Calculation +^^^^^^^^^^^^^^^^^^^ + +The previous step is optional if you are wanting to perform a strain +calculation. If you don't need to do any kind of visualization or analysis, you +can import DIC data and calculate strains using + + +.. tab-set:: + + .. tab-item:: Embedded DIC data Import + + .. code-block:: python + + strain.calculate_2d(data="./dic_results_*", + input_delimiter=",", + window_size=5, + window_element=9 + ) + + .. tab-item:: Prior DIC data Import + + .. code-block:: Python + + dic_data = dic.import_2d(data="./dic_results_*", delimiter=",") + + strain.calculate_2d( + data=dic_data, + window_size=5, + window_element=9 + ) + + +Understanding Strain Output Files +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Just like the DIC output, the strain output files are saved +in the users current working directory in human readable .CSV format with a filename prefix of +``strain_`` followed by the name of the deformed image. + +**The output will have the following columns:** + +* ``window_x``: X-coordinate of the strain window center. +* ``window_y``: Y-coordinate of the strain window center. +* ``def_grad_00``: Deformation gradient component, :math:`F_{00}`. +* ``def_grad_01``: Deformation gradient component, :math:`F_{01}`. +* ``def_grad_10``: Deformation gradient component, :math:`F_{10}`. +* ``def_grad_11``: Deformation gradient component, :math:`F_{11}`. +* ``eps_00``: Strain tensor component :math:`\eps_{00} (normal strain in x-direction). +* ``eps_01``: Strain tensor component :math:`\eps_{01} (shear strain xy). +* ``eps_10``: Strain tensor component :math:`\eps_{10} (shear strain yx). +* ``eps_11``: Strain tensor component :math:`\eps_{11} (normal strain in y-direction). + +You can alter the path, delimiter and filename prefix of the output file using the arguments +:code:`output_basepath`, :code:`output_delimiter` and :code:`output_prefix`. You +can also opt to save results in binary format. This can be done by setting +:code:`output_binary=True`. + + +Importing Strain Data +^^^^^^^^^^^^^^^^^^^^^^^^ +Importing Strain data is done with the ``strain.import_2d`` command. +The below highlights how to import data and +create a simple plot of the normal strain in the x-direction: + +.. code-block:: Python + + import matplotlib.pyplot as plt + + strain_data = strain.import_2d(data="./dic_results_*") + + # plot of vertical displacement for first deformation image. + plt.pcolor(strain_data.window_x, + strain_data.window_y, + strain_data.eps_xx[0]) # [image, y, x] + +The import will find all files in the current working directory with that +filname prefix. If you have changed :code:`output_delimiter` prior to the +correlation you will also need to specify the delimiter when importing the data. + +DIC with Large Images/Displacements +------------------------------------ + +Setting a Maximum Displacement +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Typically if you are performing DIC with large displacements that will exceed +well above 100 pixels, then there will be a few arguments that you will want to +consider tweaking that might help to improve your results. + +Firstly, it's important to chose a max displacement value that is comfortably +larger than your estimate for the final maximum displacement. For example, if +you *think* your max displacement is roughly 300 pixels, then trying a value of +512 (powers of two help with FFT efficiency) would be a sensible option. This +can be done with: + +.. code-block:: Python + :emphasize-lines: 3 + + dic.calculate_2d( + ..., + max_displacement=512, + ..., + ) + +Pyvale will always round the `max_displacement` value up to the next greatest +power of 2. So if you select 400, it would still round to 512. Again, this is to +benefit from the efficiencies of FFTs. + +Enabling Outlier Removal in Multiwindow FFTCC Initialization +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The FFTCC multiwindowing approach involves seeding smaller windows with the +rigid estimates from neighbouring points in the previous larger window. This can +sometimes be problematic when multiple windows are involved as incorrect +estimates can be propogated through the smaller windows, leading to a wildly +incorrect initial rigid estimate for the displacements. + +Pyvale has a Median Absolute Deviation (MAD) outlier removal +flag that, when enabled, will kill likely incorrect spikes in the rigid +estimates or each FFTCC window size. This can be enabled with the following +arguments when calling the DIC engine: + +.. code-block:: Python + :emphasize-lines: 3-4 + + dic.calculate_2d( + ..., + fft_mad=True, + fft_mad_scale=3.0, # <-- Default value + ... + ) + +The MAD outlier removal works in the following way: + +#. Looks at nearby subsets in a 2D neighborhood +#. Computes the median of their shifts +#. Computes the MAD (median absolute deviation) +#. A value is replaced if the the condition :math:`| x − \mathrm{median}| > \mathrm{fft\_mad\_scale} \times \mathrm{MAD}` is met. + A larger :code:`fft_mad_scale` is therefore more *tolerant*, while a smaller value kills larger deviations. + + +Sequential Image Loading +^^^^^^^^^^^^^^^^^^^^^^^^ + +When working with a series of large images, RAM usage starts to become an +important consideration. In it's current form, Pyvale will read **all** images +in the workflow when it starts. This isn't a huge problem for typical DIC +workflows where images are typically 10s of MBs, but will start to cause crashes +with high resolution images (100s MBs.). To get around this we'd recommend +**placing the DIC engine call in a loop over the images**. An example of which can be found +below: + +.. code-block:: Python + + ref_img = "ref_00.tiff" + def_imgs = ["def_00.tiff", "def_01.tiff", "def_02.tiff", ...] + + for def_img in def_imgs: + + dic.calculate_2d( + ..., + reference=ref_img, + deformed=def_img, + ..., + ) + +There are plans to change this in later pyvale versions so that images +are read sequentially and thus avoiding the need for any loops. Please keep an +eye on the documentation for any future changes. + + +Selecting a Thread Count +------------------------------- + +Pyvale uses `OpenMP `_ for parallel calculations. +Users can select the number of threads used in the DIC calculation with the +:code:`num_threads` argument in :code:`dic.calculate_2d`: + + +.. code-block:: Python + :emphasize-lines: 3 + + dic.calculate_2d( + ..., + num_threads=, + ..., + ) + +Alternatively, those on UNIX operating systems (MacoS, Linux) can set the number +of threads using the :code:`OMP_NUM_THREADS` environment variable. +If you are using Windowos PowerShell you can use the command :code:`$env:OMP_NUM_THREADS=`. +It is worth noting that Pyvale will only use this value if it has not been +specified with the :code:`num_threads=` function argument. diff --git a/docs/source/guide_user/guide_dic_flowchart.png b/docs/source/guide_user/guide_dic_flowchart.png new file mode 100644 index 000000000..cbfc7c4d9 Binary files /dev/null and b/docs/source/guide_user/guide_dic_flowchart.png differ diff --git a/docs/source/guide_user/guide_dic_interactive.png b/docs/source/guide_user/guide_dic_interactive.png new file mode 100644 index 000000000..afed237a5 Binary files /dev/null and b/docs/source/guide_user/guide_dic_interactive.png differ diff --git a/docs/source/guide_user/guide_dic_interactive.yaml b/docs/source/guide_user/guide_dic_interactive.yaml new file mode 100644 index 000000000..17d14ff01 --- /dev/null +++ b/docs/source/guide_user/guide_dic_interactive.yaml @@ -0,0 +1,45 @@ +- type: RectROI + pos: + - 34 + - 37 + size: + - 433 + - 588 + add: true +- type: RectROI + pos: + - 322 + - 53 + size: + - 123 + - 225 + add: false +- type: CircleROI + pos: + - 371 + - 536 + size: + - 133 + - 133 + add: false +- type: PolyLineROI + points: + - - 158 + - 422 + - - 85 + - 482 + - - 88 + - 584 + - - 222 + - 586 + - - 227 + - 480 + add: false +- type: SeedROI + pos: + - 108 + - 145 + size: + - 41 + - 41 + add: true diff --git a/docs/source/guide/guide_sensorsim.rst b/docs/source/guide_user/guide_sensorsim.rst similarity index 99% rename from docs/source/guide/guide_sensorsim.rst rename to docs/source/guide_user/guide_sensorsim.rst index 9a513f513..9cb3ca4bf 100644 --- a/docs/source/guide/guide_sensorsim.rst +++ b/docs/source/guide_user/guide_sensorsim.rst @@ -1,7 +1,7 @@ .. _guide_sensorsim: -Sensor Simulation Guide -======================= +Sensor Simulation User Guide +============================= The sensor simulation module is pyvale is used for modelling sensors including sources of uncertainty with a particular focus on systematic an Type B measurement errors. Systematic and Type B measurement errors are the most difficult to characterise in practice and often contribute the most to the overall uncertainty of a given measurement. diff --git a/docs/source/guide_user/guide_strain.rst b/docs/source/guide_user/guide_strain.rst new file mode 100644 index 000000000..e69de29bb diff --git a/docs/source/guide/guide.rst b/docs/source/guide_user/guide_user.rst similarity index 98% rename from docs/source/guide/guide.rst rename to docs/source/guide_user/guide_user.rst index 43853abe3..bf406ce77 100644 --- a/docs/source/guide/guide.rst +++ b/docs/source/guide_user/guide_user.rst @@ -1,10 +1,10 @@ -.. _guide_overview: +.. _guide_overview_user: User Guide ================================= -Module user guides ------------------- +Module Specific Guides +----------------------- .. toctree:: :maxdepth: 1 diff --git a/docs/source/index.rst b/docs/source/index.rst index 81bb77a45..a49ad591a 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -11,27 +11,127 @@ Pyvale: The Python Validation Engine ==================================== -Pyvale is your virtual engineering laboratory: An all-in-one package for sensor uncertainty quantification simulation, experimental design, sensor placement optimisation and simulation calibration/validation. + +.. |pypi| image:: https://img.shields.io/pypi/v/pyvale + :target: https://pypi.org/project/pyvale/ + :alt: PyPI version + +.. |github| image:: https://img.shields.io/badge/github-repo-blue?logo=github + :target: https://github.com/Computer-Aided-Validation-Laboratory/pyvale + :alt: GitHub repository + +.. |wheels| image:: https://img.shields.io/github/actions/workflow/status/Computer-Aided-Validation-Laboratory/pyvale/wheels.yml?branch=main&label=Build + :target: https://github.com/Computer-Aided-Validation-Laboratory/pyvale/actions/workflows/wheels.yml + :alt: Wheels Build + +.. |tests| image:: https://img.shields.io/github/actions/workflow/status/Computer-Aided-Validation-Laboratory/pyvale/tests.yml?branch=main&label=Tests + :target: https://github.com/Computer-Aided-Validation-Laboratory/pyvale/actions/workflows/tests.yml + :alt: Run tests + +.. |issues| image:: https://img.shields.io/github/issues/Computer-Aided-Validation-Laboratory/pyvale + :target: https://github.com/Computer-Aided-Validation-Laboratory/pyvale/issues + :alt: Open GitHub issues + +.. |PR| image:: https://img.shields.io/github/issues-pr/Computer-Aided-Validation-Laboratory/pyvale + :target: https://github.com/Computer-Aided-Validation-Laboratory/pyvale/issues + :alt: Open GitHub Pull Requests + +.. |license| image:: https://img.shields.io/github/license/Computer-Aided-Validation-Laboratory/pyvale + :target: https://choosealicense.com/licenses/mit/ + :alt: MIT License + +|pypi| |github| |wheels| |tests| |issues| |PR| |license| + + +Pyvale aims to become an all-in-one package for sensor uncertainty quantification simulation, experimental design, sensor placement optimisation and simulation calibration/validation. Used to simulate experimental data from an input multi-physics simulation by explicitly modelling sensors with realistic uncertainties. -Useful for experimental data analysis (especially for imaging sensors such as digital image correlation and infra-red thermography), experimental design, sensor placement optimisation, testing simulation validation metrics and virtually testing digital shadows/twins. +We are actively developing dedicated tools for simulation and uncertainty quantification of imaging sensors including digital image correlation (DIC) and infra-red thermography (IRT). + +Getting Started +--------------------------------- + +.. grid:: 2 + :gutter: 3 + + .. grid-item-card:: Installation + :link: install/install + :link-type: doc + :text-align: center + :shadow: lg + + :octicon:`gear;5em` + + .. grid-item-card:: Examples + :link: examples/examples + :link-type: doc + :text-align: center + :shadow: lg + + :octicon:`file-code;5em` + + .. grid-item-card:: User Guide + :link: guide_user/guide_user + :link-type: doc + :text-align: center + :shadow: lg -**Current Release:** |release| + :octicon:`workflow;5em` + + .. grid-item-card:: Theory Overview + :link: guide_theory/guide_theory + :link-type: doc + :text-align: center + :shadow: lg + + :octicon:`book;5em` -**Useful links:** | `Source Repository `_ | `Issues `_ | `PyPI `_ | `MIT License `_ | .. toctree:: - :maxdepth: 1 - :caption: Contents: - - guide/guide - install/install - examples/examples - api_py - api_cpp - cite - -Contributors ------------- + :hidden: + + install/install + examples/examples + guide_user/guide_user + guide_theory/guide_theory + api_py + api_cpp + cite + + +Citing Pyvale +--------------- + +If you use the code in your published work, then please cite the following article: + +.. tab-set:: + + .. tab-item:: MLA + + Hirst, Joel, et al. "PYVALE: A Fast, Scalable, Open-Source 2D Digital Image Correlation + (DIC) Engine Capable of Handling Gigapixel Images." + *arXiv preprint arXiv:2601.12941* (2026). + + .. tab-item:: APA + + Hirst, J., Sibson, L., Tayeb, A., Poole, B., Sampson, M., Bielajewa, W., ... & Fletcher, L. (2026). + PYVALE: A Fast, Scalable, Open-Source 2D Digital Image Correlation (DIC) Engine + Capable of Handling Gigapixel Images. + *arXiv preprint arXiv:2601.12941*. + + .. tab-item:: Bibtex + + .. code-block:: + + @article{pyvale2026, + title={PYVALE: A Fast, Scalable, Open-Source 2D Digital Image Correlation (DIC) Engine Capable of Handling Gigapixel Images}, + author={Hirst, Joel and Sibson, Lorna and Tayeb, Adel and Poole, Ben and Sampson, Megan and Bielajewa, Wiera and Atkinson, Michael and Marsh, Alex and Spencer, Rory and Hamill, Rob and others}, + journal={arXiv preprint arXiv:2601.12941}, + year={2026} + } + + +Key Contributors +----------------- The Computer Aided Validation Team at United Kingdom Atomic Energy Authority (UKAEA): diff --git a/docs/source/install/install.rst b/docs/source/install/install.rst index 07e152cb5..254a9656a 100644 --- a/docs/source/install/install.rst +++ b/docs/source/install/install.rst @@ -2,7 +2,51 @@ Installation =================== -We have detailed install guides for non-specialist python users for the common operating systems below. This includes walkthroughs on how to install the correct python version for your operating system and how to setup a virtual environment to install ``pyvale``. + +If you have **Python 3.11** installed you can setup ``pyvale`` using your preferred package manager: + +.. tab-set:: + + .. tab-item:: pip + + .. code-block:: bash + + # Create a virtual environment with Python 3.11 + python3.11 -m venv venv-pyvale + + # Activate the environment + source venv-pyvale/bin/activate # On Windows: .\venv-pyvale\Scripts\Activate.ps1 + + # Install pyvale + pip install pyvale + + .. tab-item:: uv + + .. code-block:: bash + + # Initialize a new project with Python 3.11 + uv init --python 3.11 try-pyvale + cd try-pyvale + + # Add pyvale as a dependency + uv add pyvale + + + .. tab-item:: conda + + .. code-block:: bash + + # Create a conda environment with Python 3.11 + conda create -n pyvale-env python=3.11 + + # Activate the environment + conda activate pyvale-env + + # Install pyvale + pip install pyvale + +**We have detailed install guides for non-specialist python users for the common operating systems below. +This includes walkthroughs on how to install the correct python version for your operating system and how to setup a virtual environment.** .. toctree:: :maxdepth: 1 @@ -10,10 +54,3 @@ We have detailed install guides for non-specialist python users for the common o install_linux.rst install_mac.rst install_windows.rst - -If you are already familiar with setting up and using virtual environments in python then all you need to do is create a python 3.11 environment and install ``pyvale`` from PyPI using: - -.. code-block:: bash - - pip install pyvale - diff --git a/docs/source/install/install_mac.rst b/docs/source/install/install_mac.rst index 096a719df..9b256282c 100644 --- a/docs/source/install/install_mac.rst +++ b/docs/source/install/install_mac.rst @@ -3,13 +3,14 @@ MacOS ###### -Configuring Python3.11 +Configuring Python 3.11 *********************** + To be compatible with ``bpy`` (the Blender python interface), ``pyvale`` requires python 3.11. Homebrew is a free and open-source commonly used package manager for macOS that often simplifies the process of installing, updating, and managing software. Install via Homebrew -========= +===================== Simple installation instructions for Homebrew itself be found at ``_. After you have homebrew setup, you can install python3.11 with: @@ -55,7 +56,7 @@ This will create a virtual environment called 'pyvale-env' in a folder of the sa If this has worked you should see '(pyvale-env)' at the start of your command prompt line showing the environmen is activated. If you ever need to activate the environment in a new command prompt you just need to run the 'activate' script again from the folder you are currently in. Virtual Environments in VSCode ------------------------------- +=============================== To use you virtual environment in VSCode to run some of the examples you will need to make sure you have selected your virtual environment as your python interpreter. To do this first open the folder that you want to work from which should be the same folde that contains your virtual environment folder (that is the pyvale-env folder). @@ -93,7 +94,7 @@ Installation from Source This will only be needed if you want an editable installation of ``pyvale`` for most applications users will want to use the PyPI version above. Dependencies -^^^^^^^^^^^^ +============ Apple has disabled OpenMP for the default C/C++ compilers shipped with Xcode. Therefore, it's reccomended you install either ``gcc`` OR``llvm`` AND ``libomp`` using the homebrew (`https://brew.sh/`_) package manager. @@ -131,7 +132,7 @@ used during the build process: export CXX=/opt/homebrew/opt/llvm/bin/clang++ Clone and Install the Github Repository -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +======================================= Clone ``pyvale`` to your local system using git along with submodules using: diff --git a/images/hrdic.png b/images/hrdic.png new file mode 100644 index 000000000..829427618 Binary files /dev/null and b/images/hrdic.png differ diff --git a/pyproject.toml b/pyproject.toml index 19ae7a009..49b985449 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "scikit_build_core.build" [project] name = "pyvale" -version = "2026.1.3" +version = "2026.2.0" description = "Your virtual engineering lab: An all-in-one package for sensor simulation, uncertainty quantification, sensor placement optimisation and simulation calibration or validation." authors = [ { name = "scepticalrabbit et al.", email = "thescepticalrabbit@gmail.com" }, diff --git a/src/pyvale/blender/blenderlightdata.py b/src/pyvale/blender/blenderlightdata.py index 7c11ff834..a9376ba43 100644 --- a/src/pyvale/blender/blenderlightdata.py +++ b/src/pyvale/blender/blenderlightdata.py @@ -11,6 +11,21 @@ #TODO: docstrings class LightType(Enum): + """ + Enumeration of available light types in Blender. + + Attributes + ---------- + POINT : str + Omnidirectional point light source + SUN : str + Directional sunlight (parallel rays) + SPOT : str + Cone-shaped spotlight + AREA : str + Area light with defined shape + """ + POINT = 'POINT' SUN = 'SUN' SPOT = 'SPOT' @@ -18,9 +33,25 @@ class LightType(Enum): @dataclass(slots=True) class LightData(): + """ + Configuration data for a light source in Blender. + + This dataclass stores the position, orientation, intensity, and type + information for scene lighting. + """ + pos_world: np.ndarray + """Position of the light in world coordinates (x, y, z)""" + rot_world: Rotation - energy: int # NOTE: In Watts + """Rotation of the light in world space (scipy Rotation object)""" + + energy: int + """Light intensity in Watts""" + type: LightType = LightType.POINT + """Type of light source (default: POINT)""" + shadow_soft_size: float = 1.5 + """Size of the light for soft shadow calculation (default: 1.5)""" diff --git a/src/pyvale/dataset/dataset.py b/src/pyvale/dataset/dataset.py index da050ec82..60eec90b2 100644 --- a/src/pyvale/dataset/dataset.py +++ b/src/pyvale/dataset/dataset.py @@ -156,7 +156,7 @@ def thermal_2d_path() -> Path: Returns ------- Path - Path to the exodus (*.e) output file for this simulation case. + Path to the exodus (``*.e``) output file for this simulation case. """ return Path(files("pyvale.data").joinpath("case18_out.e")) @@ -179,7 +179,7 @@ def thermal_3d_path() -> Path: Returns ------- Path - Path to the exodus (*.e) output file for this simulation case. + Path to the exodus (``*.e``) output file for this simulation case. """ return Path(files("pyvale.data").joinpath("case16_out.e")) @@ -198,7 +198,7 @@ def mechanical_2d_path() -> Path: Returns ------- Path - Path to the exodus (*.e) output file for this simulation case. + Path to the exodus (``*.e``) output file for this simulation case. """ return Path(files("pyvale.data").joinpath("case17_out.e")) @@ -218,7 +218,7 @@ def thermomechanical_2d_path() -> Path: Returns ------- Path - Path to the exodus (*.e) output file for this simulation case. + Path to the exodus (``*.e``) output file for this simulation case. """ return Path(files("pyvale.data").joinpath("case18_out.e")) @@ -239,7 +239,7 @@ def thermomechanical_3d_path() -> Path: Returns ------- Path - Path to the exodus (*.e) output file for this simulation case. + Path to the exodus (``*.e``) output file for this simulation case. """ return Path(files("pyvale.data").joinpath("case16_out.e")) @@ -262,7 +262,7 @@ def thermomechanical_2d_experiment_paths() -> list[Path]: Returns ------- list[Path] - Paths to the exodus (*.e) output files for this simulated experiment. + Paths to the exodus (``*.e``) output files for this simulated experiment. """ return [Path(files("pyvale.data").joinpath("case18_out.e")), Path(files("pyvale.data").joinpath("case18_d_out.e"))] @@ -287,7 +287,7 @@ def thermomechanical_3d_experiment_paths() -> list[Path]: Returns ------- list[Path] - Paths to the exodus (*.e) output files for this simulated experiment. + Paths to the exodus (``*.e``) output files for this simulated experiment. """ return [Path(files("pyvale.data").joinpath("case16_out.e")), @@ -304,7 +304,7 @@ def render_mechanical_3d_path() -> Path: Returns ------- Path - Path to the exodus (*.e) output file for this simulation case. + Path to the exodus (``*.e``) output file for this simulation case. """ return Path(files("pyvale.data").joinpath("case26_out.e")) @@ -348,14 +348,15 @@ def element_case_output_path(elem_type: EElemTest) -> Path: Returns ------- Path - Path to the exodus (*.e) output file for this simulation case. + Path to the exodus (``*.e``) output file for this simulation case. """ return Path(files("pyvale.data") .joinpath(f"case00_{elem_type.value}_out.e")) def dic_plate_with_hole_ref() -> Path: - """Path to the reference image for the plate with hole example. + """ + Path to the reference image for the plate with hole example. 1040x1540 image in .tiff format. Parameters @@ -366,14 +367,15 @@ def dic_plate_with_hole_ref() -> Path: Returns ------- Path - Path to the reference image (*.tiff). + Path to the reference image (``.tiff``). """ return Path(files("pyvale.data") .joinpath("plate_hole_ref0000.tiff")) def dic_plate_with_hole_def() -> Path: - """Path to the deformed images for the plate with hole example. + """ + Path to the deformed images for the plate with hole example. 1040x1540 image in .tiff format. Parameters @@ -384,14 +386,15 @@ def dic_plate_with_hole_def() -> Path: Returns ------- Path - Path to the reference image (*.tiff). + Path to the reference image (``.tiff``). """ return Path(files("pyvale.data") .joinpath("plate_hole_def*.tiff")) def dic_plate_rigid_ref() -> Path: - """Path to the reference image for the rigid deformation example. + """ + Path to the reference image for the rigid deformation example. 1040x1540 image in .tiff format. Parameters @@ -402,79 +405,85 @@ def dic_plate_rigid_ref() -> Path: Returns ------- Path - Path to the reference image (*.tiff). + Path to the reference image (``.tiff``). """ return Path(files("pyvale.data") .joinpath("plate_rigid_ref0000.tiff")) def dic_plate_rigid_def() -> Path: - """Path to the rigid deformation example images. + """ + Path to the rigid deformation example images. 1040x1540 image in .tiff format. Returns ------- Path - Path to the deformation images (*.tiff). + Path to the deformation images (``.tiff``). """ return Path(files("pyvale.data") .joinpath("plate_rigid_def0*.tiff")) def dic_plate_rigid_def_25px() -> Path: - """Path to the 25px rigid deformation image. + """ + Path to the 25px rigid deformation image. 1040x1540 image in .tiff format. Returns ------- Path - Path to the 25 px deformed image (*.tiff). + Path to the 25 px deformed image (``.tiff``). """ return Path(files("pyvale.data").joinpath("plate_rigid_def_25px.tiff")) def dic_plate_rigid_def_50px() -> Path: - """Path to the 50px rigid deformation image. + """ + Path to the 50px rigid deformation image. 1040x1540 image in .tiff format. Returns ------- Path - Path to the 50px deformed image (*.tiff). + Path to the 50px deformed image (``.tiff``). """ return Path(files("pyvale.data").joinpath("plate_rigid_def_50px.tiff")) def dic_challenge_ref() -> Path: - """Path to the reference images for the 2D DIC challenge. + """ + Path to the reference images for the 2D DIC challenge. Returns ------- Path - Path to the reference image (*.tiff). + Path to the reference image (``.tiff``). """ return Path(files("pyvale.data") .joinpath("DIC_Challenge_Star_Noise_Ref.tiff")) def dic_challenge_def() -> Path: - """Path to the reference images for the 2D DIC challenge. + """ + Path to the reference images for the 2D DIC challenge. Returns ------- Path - Path to the deformed image (*.tiff). + Path to the deformed image (``.tiff``). """ return Path(files("pyvale.data") .joinpath("DIC_Challenge_Star_Noise_Def.tiff")) def cal_target() -> Path: - """Path to example calibration target. + """ + Path to example calibration target. Returns ------- Path - Path to the image (*.tiff). + Path to the image (``.tiff``). """ return Path(files("pyvale.data") .joinpath("cal_target.tiff")) diff --git a/src/pyvale/dic/cpp/dicfourier.cpp b/src/pyvale/dic/cpp/dicfourier.cpp index 267b67c2b..e481b5785 100644 --- a/src/pyvale/dic/cpp/dicfourier.cpp +++ b/src/pyvale/dic/cpp/dicfourier.cpp @@ -23,7 +23,7 @@ // DIC Header files #include "dicfourier.hpp" #include "dicsubset.hpp" -#include "dicinterpolator.hpp" +#include "dicinterp.hpp" namespace fourier { diff --git a/src/pyvale/dic/cpp/dicfourier.hpp b/src/pyvale/dic/cpp/dicfourier.hpp index 06c0e0627..2110e9544 100644 --- a/src/pyvale/dic/cpp/dicfourier.hpp +++ b/src/pyvale/dic/cpp/dicfourier.hpp @@ -19,7 +19,7 @@ #include // DIC Header files -#include "./dicinterpolator.hpp" +#include "./dicinterp.hpp" #include "./dicsubset.hpp" #include "./dicutil.hpp" diff --git a/src/pyvale/dic/cpp/dicinterp.hpp b/src/pyvale/dic/cpp/dicinterp.hpp new file mode 100644 index 000000000..ea6e833ec --- /dev/null +++ b/src/pyvale/dic/cpp/dicinterp.hpp @@ -0,0 +1,97 @@ +// ================================================================================ +// pyvale: the python validation engine +// License: MIT +// Copyright (C) 2025 The Computer Aided Validation Team +// ================================================================================ + +#ifndef DICINTERP_H +#define DICINTERP_H + +// STD library Header files + +// Program Header files + + + + +inline int idx_from_2d(const int x, const int y, const int length){ + return y*length+x; +} + + + +/** + * @brief namespace for bicubic spline interpolation. + * + * Based on the implementation by GNU Scientific Library (GSL). + * Main difference is the removal of the binary search for index lookup. + * For use in DIC, we only ever need integer locations and therefore its + * sufficient to get the floor value of the subpixel location. + * + */ +struct InterpVals { + double f; + double dfdx; + double dfdy; +}; + +class Interpolator { +public: + + int px_vert; + int px_hori; + + /** + * @brief Evaluates the bicubic interpolation at a specified point. + * + * Computes the interpolated value at (x,y) using bicubic interpolation from the surrounding pixel values. + * + * @param x The x-coordinate of the interpolation point + * @param y The y-coordinate of the interpolation point + * @return The interpolated value at (x,y) + */ + virtual double eval(const int ss_x, const int ss_y, const double subpx_x, const double subpx_y) const = 0; + + /** + * @brief Evaluates the x-derivative of bicubic interpolation at a specified point. + * + * Computes the partial derivative with respect to x at point (x,y). + * + * @param x The x-coordinate of the point + * @param y The y-coordinate of the point + * @return The x-derivative of the interpolated function at (x,y) + */ + virtual double eval_dx(const int ss_x, const int ss_y, const double subpx_x, const double subpx_y) const = 0; + + /** + * @brief Evaluates the y-derivative of bicubic interpolation at a specified point. + * + * Computes the partial derivative with respect to y at point (x,y). + * + * @param x The x-coordinate of the point + * @param y The y-coordinate of the point + * @return The y-derivative of the interpolated function at (x,y) + */ + virtual double eval_dy(const int ss_x, const int ss_y, const double subpx_x, const double subpx_y) const = 0; + + /** + * @brief Evaluates the bicubic interpolation and its derivatives at a specified point. + * + * Computes the interpolated value and its partial derivatives at (x,y) in a single call. + * + * @param x The x-coordinate of the point + * @param y The y-coordinate of the point + * @return Data struct containing the interpolated value and its x and y derivatives + */ + virtual InterpVals eval_and_derivs(const int ss_x, const int ss_y, const double subpx_x, const double subpx_y) const = 0; + + + virtual ~Interpolator() = default; + +}; + +#endif //DICINTERP_H + + + + diff --git a/src/pyvale/dic/cpp/dicinterpBspline.cpp b/src/pyvale/dic/cpp/dicinterpBspline.cpp new file mode 100644 index 000000000..472fab6f4 --- /dev/null +++ b/src/pyvale/dic/cpp/dicinterpBspline.cpp @@ -0,0 +1,201 @@ +// ================================================================================ +// pyvale: the python validation engine +// License: MIT +// Copyright (C) 2025 The Computer Aided Validation Team +// ================================================================================ + + + +#include +#include +#include + +#include "./dicinterpBspline.hpp" + +Bspline::Bspline(double* img, int px_hori, int px_vert){ + + // intitialise vars used globally within Interpolator. + this->image = img; + this->px_vert = px_vert; + this->px_hori = px_hori; + coeff.resize(px_vert*px_hori); + + for (int i = 0; i < px_hori*px_vert; i++){ + coeff[i] = img[i]; + } + + prefilter_x(); + prefilter_y(); +} + +// 1D cubic B-spline basis and derivatives +inline void Bspline::basis(double t, double B[4]) { + double tt = t*t, ttt = tt*t; + + B[0] = (1.0 - 3.0*t + 3.0*tt - ttt) / 6.0; + B[1] = (4.0 - 6.0*tt + 3.0*ttt) / 6.0; + B[2] = (1.0 + 3.0*t + 3.0*tt - 3.0*ttt) / 6.0; + B[3] = ttt / 6.0; +} + +inline void Bspline::basis_d(double t, double Bd[4]) { + double tt = t*t; + + Bd[0] = (-3.0 + 6.0*t - 3.0*tt) / 6.0; + Bd[1] = (-12.0*t + 9.0*tt) / 6.0; + Bd[2] = (3.0 + 6.0*t - 9.0*tt) / 6.0; + Bd[3] = (3.0*tt) / 6.0; +} + + +void Bspline::prefilter_x() { + + const double z = std::sqrt(3.0) - 2.0; + const double lambda = (1.0 - z)*(1.0 - 1.0/z); + + // Normalize + for (int y = 0; y < px_vert; y++) + for (int x = 0; x < px_hori; x++) + coeff[y*px_hori + x] *= lambda; + + // Causal + for (int y = 0; y < px_vert; y++) { + double* row = &coeff[y*px_hori]; + for (int x = 1; x < px_hori; x++) + row[x] += z * row[x-1]; + } + + // Anticausal + for (int y = 0; y < px_vert; y++) { + double* row = &coeff[y*px_hori]; + row[px_hori-1] = z/(z*z - 1.0) * row[px_hori-1]; + for (int x = px_hori-2; x >= 0; x--) + row[x] = z*(row[x+1] - row[x]); + } +} + +// ------------------------------------------------------- +// Prefilter along each column +// ------------------------------------------------------- +void Bspline::prefilter_y() { + const double z = std::sqrt(3.0) - 2.0; + const double lambda = (1.0 - z)*(1.0 - 1.0/z); + + // Normalize + for (int y = 0; y < px_vert; y++) + for (int x = 0; x < px_hori; x++) + coeff[y*px_hori + x] *= lambda; + + // Causal + for (int x = 0; x < px_hori; x++) { + for (int y = 1; y < px_vert; y++) + coeff[y*px_hori + x] += z * coeff[(y-1)*px_hori + x]; + } + + // Anticausal + for (int x = 0; x < px_hori; x++) { + coeff[(px_vert-1)*px_hori + x] = z/(z*z - 1.0) * coeff[(px_vert-1)*px_hori + x]; + for (int y = px_vert-2; y >= 0; y--) + coeff[y*px_hori + x] = z*(coeff[(y+1)*px_hori + x] - coeff[y*px_hori + x]); + } +} + + +double Bspline::eval(const int ss_x, const int ss_y, const double subpx_x, double subpx_y) const { + int ix = (int)floor(subpx_x); + int iy = (int)floor(subpx_y); + + double tx = subpx_x - ix; + double ty = subpx_y - iy; + + double Bx[4], By[4]; + basis(tx, Bx); + basis(ty, By); + + double f = 0.0; + for (int j = 0; j < 4; j++) { + int yy = std::clamp(iy + j - 1, 0, px_vert-1); + + for (int i = 0; i < 4; i++) { + int xx = std::clamp(ix + i - 1, 0, px_hori-1); + double c = coeff[yy*px_hori + xx]; + f += c * Bx[i] * By[j]; + } + } + return f; +} + +double Bspline::eval_dx(const int ss_x, const int ss_y, const double subpx_x, double subpx_y) const { + int ix = (int)floor(subpx_x); + int iy = (int)floor(subpx_y); + + double tx = subpx_x - ix; + double ty = subpx_y - iy; + + double By[4], dBx[4]; + basis(ty, By); + basis_d(tx, dBx); + + double dfdx = 0.0; + for (int j = 0; j < 4; j++) { + int yy = std::clamp(iy + j - 1, 0, px_vert-1); + for (int i = 0; i < 4; i++) { + int xx = std::clamp(ix + i - 1, 0, px_hori-1); + double c = coeff[yy*px_hori + xx]; + dfdx += c * dBx[i] * By[j]; + } + } + return dfdx; +} + + +double Bspline::eval_dy(const int ss_x, const int ss_y, const double subpx_x, double subpx_y) const { + int ix = (int)floor(subpx_x); + int iy = (int)floor(subpx_y); + + double tx = subpx_x - ix; + double ty = subpx_y - iy; + + double Bx[4], dBy[4]; + basis(tx, Bx); + basis_d(ty, dBy); + + double dfdy = 0.0; + for (int j = 0; j < 4; j++) { + int yy = std::clamp(iy + j - 1, 0, px_vert-1); + for (int i = 0; i < 4; i++) { + int xx = std::clamp(ix + i - 1, 0, px_hori-1); + double c = coeff[yy*px_hori + xx]; + dfdy += c * Bx[i] * dBy[j]; + } + } + return dfdy; +} + +InterpVals Bspline::eval_and_derivs(const int ss_x, const int ss_y, const double subpx_x, double subpx_y) const { + int ix = (int)floor(subpx_x); + int iy = (int)floor(subpx_y); + + double tx = subpx_x - ix; + double ty = subpx_y - iy; + + double Bx[4], By[4], dBx[4], dBy[4]; + basis(tx, Bx); + basis(ty, By); + basis_d(tx, dBx); + basis_d(ty, dBy); + + InterpVals out {0,0,0}; + + for (int j = 0; j < 4; j++) { + int yy = std::clamp(iy + j - 1, 0, px_vert-1); + for (int i = 0; i < 4; i++) { + int xx = std::clamp(ix + i - 1, 0, px_hori-1); + double c = coeff[yy*px_hori + xx]; + out.f += c * Bx[i] * By[j]; + out.dfdx += c * dBx[i] * By[j]; + out.dfdy += c * Bx[i] * dBy[j]; + } + } + return out; +} diff --git a/src/pyvale/dic/cpp/dicinterpBspline.hpp b/src/pyvale/dic/cpp/dicinterpBspline.hpp new file mode 100644 index 000000000..70a5fbebe --- /dev/null +++ b/src/pyvale/dic/cpp/dicinterpBspline.hpp @@ -0,0 +1,96 @@ +// ================================================================================ +// pyvale: the python validation engine +// License: MIT +// Copyright (C) 2025 The Computer Aided Validation Team +// ================================================================================ + + + + +#ifndef DICINTERPBSPLINE_H +#define DICINTERPBSPLINE_H + + +// STD library Header files +#include +#include + +// Program Header files +#include "./dicinterp.hpp" + + +class Bspline : public Interpolator { + +private: + + std::vector coeff; + double *image; + + // Recursive spline prefilter + void prefilter_x(); + void prefilter_y(); + + // 1D cubic B-spline basis and derivatives + static inline void basis(double t, double B[4]); + static inline void basis_d(double t, double Bd[4]); + +public: + + /** + * @brief Initializes the bicubic interpolator with deformed image data. + * + * Sets up the necessary data structures and computes derivatives required for bicubic interpolation. + * + * @param img Pointer to the image data array + * @param px_hori Width of the image in pixels + * @param px_vert Height of the image in pixels + */ + Bspline(double * img, int px_hori, int px_vert); + + /** + * @brief Evaluates the bicubic interpolation at a specified point. + * + * Computes the interpolated value at (x,y) using bicubic interpolation from the surrounding pixel values. + * + * @param x The x-coordinate of the interpolation point + * @param y The y-coordinate of the interpolation point + * @return The interpolated value at (x,y) + */ + double eval(const int ss_x, const int ss_y, const double subpx_x, const double subpx_y) const override; + + /** + * @brief Evaluates the x-derivative of bicubic interpolation at a specified point. + * + * Computes the partial derivative with respect to x at point (x,y). + * + * @param x The x-coordinate of the point + * @param y The y-coordinate of the point + * @return The x-derivative of the interpolated function at (x,y) + */ + double eval_dx(const int ss_x, const int ss_y, const double subpx_x, const double subpx_y) const override; + + /** + * @brief Evaluates the y-derivative of bicubic interpolation at a specified point. + * + * Computes the partial derivative with respect to y at point (x,y). + * + * @param x The x-coordinate of the point + * @param y The y-coordinate of the point + * @return The y-derivative of the interpolated function at (x,y) + */ + double eval_dy(const int ss_x, const int ss_y, const double subpx_x, const double subpx_y) const override; + + /** + * @brief Evaluates the bicubic interpolation and its derivatives at a specified point. + * + * Computes the interpolated value and its partial derivatives at (x,y) in a single call. + * + * @param x The x-coordinate of the point + * @param y The y-coordinate of the point + * @return Data struct containing the interpolated value and its x and y derivatives + */ + InterpVals eval_and_derivs(const int ss_x, const int ss_y, const double subpx_x, const double subpx_y) const override; + +}; + +#endif //DICINTERPBSPLINE_H diff --git a/src/pyvale/dic/cpp/dicinterpolator.cpp b/src/pyvale/dic/cpp/dicinterpHermite.cpp similarity index 94% rename from src/pyvale/dic/cpp/dicinterpolator.cpp rename to src/pyvale/dic/cpp/dicinterpHermite.cpp index bf79d6b12..2a1b56e2b 100644 --- a/src/pyvale/dic/cpp/dicinterpolator.cpp +++ b/src/pyvale/dic/cpp/dicinterpHermite.cpp @@ -17,16 +17,11 @@ #include "../../common_cpp/dicsignalhandler.hpp" // DIC Header files -#include "./dicinterpolator.hpp" - - -inline int idx_from_2d(const int x, const int y, const int length){ - return y*length+x; -} +#include "./dicinterpHermite.hpp" -Interpolator::Interpolator(double*img, int px_hori, int px_vert){ +Hermite::Hermite(double *img, int px_hori, int px_vert){ //Timer timer("interpolator initialisation"); @@ -171,7 +166,7 @@ Interpolator::Interpolator(double*img, int px_hori, int px_vert){ } } -double Interpolator::eval_bicubic(const int ss_x, const int ss_y, const double subpx_x, const double subpx_y) const { +double Hermite::eval(const int ss_x, const int ss_y, const double subpx_x, const double subpx_y) const { // get indices size_t xi,yi; @@ -241,7 +236,7 @@ double Interpolator::eval_bicubic(const int ss_x, const int ss_y, const double s -double Interpolator::eval_bicubic_dx(const int ss_x, const int ss_y, const double subpx_x, const double subpx_y) const{ +double Hermite::eval_dx(const int ss_x, const int ss_y, const double subpx_x, const double subpx_y) const{ /* first compute the indices into the data arrays where we are interpolating */ size_t xi,yi; @@ -297,7 +292,7 @@ double Interpolator::eval_bicubic_dx(const int ss_x, const int ss_y, const doubl } -double Interpolator::eval_bicubic_dy(const int ss_x, const int ss_y, const double subpx_x, const double subpx_y) const{ +double Hermite::eval_dy(const int ss_x, const int ss_y, const double subpx_x, const double subpx_y) const{ size_t xi,yi; index_lookup_xy(ss_x, ss_y, xi, yi, subpx_x, subpx_y); @@ -353,7 +348,7 @@ double Interpolator::eval_bicubic_dy(const int ss_x, const int ss_y, const doubl } -InterpVals Interpolator::eval_bicubic_and_derivs(const int ss_x, const int ss_y, const double subpx_x, const double subpx_y) const{ +InterpVals Hermite::eval_and_derivs(const int ss_x, const int ss_y, const double subpx_x, const double subpx_y) const{ // pixel floor of x and y size_t xi,yi; @@ -446,7 +441,7 @@ InterpVals Interpolator::eval_bicubic_and_derivs(const int ss_x, const int ss_y, -inline void Interpolator::coeff_calc(std::vector &tridiag_solution, double dy, double dx, size_t i, double *b, double *c, double *d) { +inline void Hermite::coeff_calc(std::vector &tridiag_solution, double dy, double dx, size_t i, double *b, double *c, double *d) { const double s_i = tridiag_solution[i]; const double s_ip1 = tridiag_solution[i + 1]; @@ -458,7 +453,7 @@ inline void Interpolator::coeff_calc(std::vector &tridiag_solution, doub } -inline void Interpolator::index_lookup_xy(const int ss_x, const int ss_y, size_t &xi, size_t &yi, const double subpx_x, const double subpx_y) const { +inline void Hermite::index_lookup_xy(const int ss_x, const int ss_y, size_t &xi, size_t &yi, const double subpx_x, const double subpx_y) const { if (subpx_x < px_x[0]) xi = 0; @@ -498,7 +493,7 @@ inline void Interpolator::index_lookup_xy(const int ss_x, const int ss_y, size_t } -inline int Interpolator::index_lookup(const std::vector &px, double x) const { +inline int Hermite::index_lookup(const std::vector &px, double x) const { // Clamp coordinates to valid range // double clamped_x = std::max(static_cast(index_lo), std::min(static_cast(index_hi), x)); @@ -526,7 +521,7 @@ inline int Interpolator::index_lookup(const std::vector &px, double x) c -void Interpolator::cspline_init(const std::vector &px, const std::vector &data, +void Hermite::cspline_init(const std::vector &px, const std::vector &data, std::vector &tridiag_solution){ @@ -598,7 +593,7 @@ void Interpolator::cspline_init(const std::vector &px, const std::vector } } -double Interpolator::cspline_eval_deriv(std::vector &px, std::vector &data, +double Hermite::cspline_eval_deriv(std::vector &px, std::vector &data, std::vector &local_tridiag_sol, double value, int length) { // Find the interval containing the evaluation point diff --git a/src/pyvale/dic/cpp/dicinterpolator.hpp b/src/pyvale/dic/cpp/dicinterpHermite.hpp similarity index 81% rename from src/pyvale/dic/cpp/dicinterpolator.hpp rename to src/pyvale/dic/cpp/dicinterpHermite.hpp index 3b1009d7c..fe208d02f 100644 --- a/src/pyvale/dic/cpp/dicinterpolator.hpp +++ b/src/pyvale/dic/cpp/dicinterpHermite.hpp @@ -7,8 +7,8 @@ -#ifndef DICINTERPOLATOR_H -#define DICINTERPOLATOR_H +#ifndef DICINTERHERMITE_H +#define DICINTERHERMITE_H @@ -16,24 +16,19 @@ #include // Program Header files -/** - * @brief namespace for bicubic spline interpolation. - * - * Based on the implementation by GNU Scientific Library (GSL). - * Main difference is the removal of the binary search for index lookup. - * For use in DIC, we only ever need integer locations and therefore its - * sufficient to get the floor value of the subpixel location. - * - */ -struct InterpVals { - double f; - double dfdx; - double dfdy; -}; +#include "./dicinterp.hpp" + +class Hermite : public Interpolator { -class Interpolator { +private: + + std::vector dx; + std::vector dy; + std::vector dxy; + std::vector px_y; + std::vector px_x; + double *image; -private: /** * @brief Calculates the coefficients for cubic spline interpolation. @@ -90,14 +85,6 @@ class Interpolator { public: - std::vector dx; - std::vector dy; - std::vector dxy; - std::vector px_y; - std::vector px_x; - double *image; - int px_vert; - int px_hori; /** * @brief Initializes the bicubic interpolator with deformed image data. @@ -108,7 +95,7 @@ class Interpolator { * @param px_hori Width of the image in pixels * @param px_vert Height of the image in pixels */ - Interpolator(double * img, int px_hori, int px_vert); + Hermite(double * img, int px_hori, int px_vert); /** * @brief Evaluates the bicubic interpolation at a specified point. @@ -119,7 +106,7 @@ class Interpolator { * @param y The y-coordinate of the interpolation point * @return The interpolated value at (x,y) */ - double eval_bicubic(const int ss_x, const int ss_y, const double subpx_x, const double subpx_y) const; + double eval(const int ss_x, const int ss_y, const double subpx_x, const double subpx_y) const override; /** * @brief Evaluates the x-derivative of bicubic interpolation at a specified point. @@ -130,7 +117,7 @@ class Interpolator { * @param y The y-coordinate of the point * @return The x-derivative of the interpolated function at (x,y) */ - double eval_bicubic_dx(const int ss_x, const int ss_y, const double subpx_x, const double subpx_y) const; + double eval_dx(const int ss_x, const int ss_y, const double subpx_x, const double subpx_y) const override; /** * @brief Evaluates the y-derivative of bicubic interpolation at a specified point. @@ -141,7 +128,7 @@ class Interpolator { * @param y The y-coordinate of the point * @return The y-derivative of the interpolated function at (x,y) */ - double eval_bicubic_dy(const int ss_x, const int ss_y, const double subpx_x, const double subpx_y) const; + double eval_dy(const int ss_x, const int ss_y, const double subpx_x, const double subpx_y) const override; /** * @brief Evaluates the bicubic interpolation and its derivatives at a specified point. @@ -152,10 +139,10 @@ class Interpolator { * @param y The y-coordinate of the point * @return Data struct containing the interpolated value and its x and y derivatives */ - InterpVals eval_bicubic_and_derivs(const int ss_x, const int ss_y, const double subpx_x, const double subpx_y) const; + InterpVals eval_and_derivs(const int ss_x, const int ss_y, const double subpx_x, const double subpx_y) const override; }; -#endif //DICINTERPOLATOR_H +#endif //DICINTERPHERMITE_H diff --git a/src/pyvale/dic/cpp/dicmain.cpp b/src/pyvale/dic/cpp/dicmain.cpp index 24f7e06c1..a730a2e64 100644 --- a/src/pyvale/dic/cpp/dicmain.cpp +++ b/src/pyvale/dic/cpp/dicmain.cpp @@ -7,6 +7,7 @@ // STD library Header files #include +#include #include #include #include @@ -25,7 +26,9 @@ #include "../../common_cpp/util.hpp" // DIC Header files -#include "./dicinterpolator.hpp" +#include "./dicinterp.hpp" +#include "./dicinterpBspline.hpp" +#include "./dicinterpHermite.hpp" #include "./dicoptimizer.hpp" #include "./dicscanmethod.hpp" #include "./dicutil.hpp" @@ -115,6 +118,7 @@ void DICengine(const py::array_t& img_stack_arr, // pointer to hold the reference interpolator (will be created once) Interpolator* interp_ref = nullptr; + Interpolator* interp_ref_inc = nullptr; // loop over deformed images. They start at index 1 in the stack for (int img_num = 1; img_num < conf.num_def_img+1; img_num++){ @@ -124,13 +128,15 @@ void DICengine(const py::array_t& img_stack_arr, double *img_def = img_stack + img_num*num_px_in_image; // define our interpolator for the reference image - Interpolator interp_def(img_def, conf.px_hori, conf.px_vert); + Interpolator* interp_def = nullptr; + if (conf.interp_routine == "BSPLINE") interp_def = new Bspline(img_def, conf.px_hori, conf.px_vert); + else if (conf.interp_routine == "HERMITE") interp_def = new Hermite(img_def, conf.px_hori, conf.px_vert); // ------------------------------------------------------------------------------------------------------------------------------------------- // raster scan // ------------------------------------------------------------------------------------------------------------------------------------------- if (conf.scan_method=="IMAGE_SCAN") - scanmethod::image(img_ref, interp_def, ss_grids[0], conf, img_num, result_arrays); + scanmethod::image(img_ref, *interp_def, ss_grids[0], conf, img_num, result_arrays); @@ -139,7 +145,7 @@ void DICengine(const py::array_t& img_stack_arr, // multiwindow FFTCC + reliability Guided // ------------------------------------------------------------------------------------------------------------------------------------------- else if (conf.scan_method=="MULTIWINDOW_RG") - scanmethod::multiwindow_reliability_guided(img_ref, img_def, interp_def, ss_grids, conf, img_num, result_arrays); + scanmethod::multiwindow_reliability_guided(img_ref, img_def, *interp_def, ss_grids, conf, img_num, result_arrays); @@ -148,8 +154,9 @@ void DICengine(const py::array_t& img_stack_arr, // singlewindow FFTCC + reliability Guided // ------------------------------------------------------------------------------------------------------------------------------------------- else if (conf.scan_method=="SINGLEWINDOW_RG"){ - if (!interp_ref) interp_ref = new Interpolator(img_ref, conf.px_hori, conf.px_vert); - scanmethod::singlewindow_incremental_reliability_guided(img_ref, img_def, *interp_ref, interp_def, ss_grids, conf, 0, img_num, result_arrays); + if (!interp_ref && conf.interp_routine=="BSPLINE") interp_ref = new Bspline(img_ref, conf.px_hori, conf.px_vert); + if (!interp_ref && conf.interp_routine=="HERMITE") interp_ref = new Hermite(img_ref, conf.px_hori, conf.px_vert); + scanmethod::singlewindow_incremental_reliability_guided(img_ref, img_def, *interp_ref, *interp_def, ss_grids, conf, 0, img_num, result_arrays); } @@ -158,7 +165,7 @@ void DICengine(const py::array_t& img_stack_arr, // multi window FFTCC ONLY // ------------------------------------------------------------------------------------------------------------------------------------------- else if (conf.scan_method=="MULTIWINDOW") - scanmethod::multiwindow(img_ref, img_def, interp_def, ss_grids, conf, img_num, result_arrays); + scanmethod::multiwindow(img_ref, img_def, *interp_def, ss_grids, conf, img_num, result_arrays); @@ -170,8 +177,9 @@ void DICengine(const py::array_t& img_stack_arr, double *img_prev = nullptr; int img_num_prev = img_num-1; img_prev = img_stack + img_num_prev*num_px_in_image; - Interpolator interp_ref_inc(img_prev, conf.px_hori, conf.px_vert); - scanmethod::singlewindow_incremental_reliability_guided(img_prev, img_def, interp_ref_inc, interp_def, ss_grids, conf, img_num_prev, img_num, result_arrays); + if (conf.interp_routine=="BSPLINE") interp_ref_inc = new Bspline(img_prev, conf.px_hori, conf.px_vert); + if (conf.interp_routine=="HERMITE") interp_ref_inc = new Hermite(img_prev, conf.px_hori, conf.px_vert); + scanmethod::singlewindow_incremental_reliability_guided(img_prev, img_def, *interp_ref_inc, *interp_def, ss_grids, conf, img_num_prev, img_num, result_arrays); } @@ -181,6 +189,8 @@ void DICengine(const py::array_t& img_stack_arr, if (!saveconf.at_end) result_arrays.write_to_disk(img_num, saveconf, ss_grids.back(), conf.num_def_img, conf.filenames); + if (interp_def) delete interp_def; + if (stop_request) break; } diff --git a/src/pyvale/dic/cpp/dicoptimizer.cpp b/src/pyvale/dic/cpp/dicoptimizer.cpp index b3d6110bd..59a956e47 100644 --- a/src/pyvale/dic/cpp/dicoptimizer.cpp +++ b/src/pyvale/dic/cpp/dicoptimizer.cpp @@ -17,7 +17,7 @@ // Program Header files -#include "./dicinterpolator.hpp" +#include "./dicinterp.hpp" #include "./dicoptimizer.hpp" #include "./dicshapefunc.hpp" #include "./dicresults.hpp" @@ -158,7 +158,7 @@ namespace optimizer { double def_y = ss_def.y[i]; // get the subset value and derivitives - interp_vals = interp_def.eval_bicubic_and_derivs(global_x, global_y, def_x+global_x, def_y+global_y); + interp_vals = interp_def.eval_and_derivs(global_x, global_y, def_x+global_x, def_y+global_y); ss_def.vals[i] = interp_vals.f; double def = ss_def.vals[i]; @@ -199,7 +199,7 @@ namespace optimizer { opt.costpdp = 0.0; for (int i = 0; i < num_px; ++i) { shapefunc::get_pixel(ss_def.x[i], ss_def.y[i], ss_ref.x[i], ss_ref.y[i], opt.pdp); - ss_def.vals[i] = interp_def.eval_bicubic(global_x, global_y, ss_def.x[i]+global_x, ss_def.y[i]+global_y); + ss_def.vals[i] = interp_def.eval(global_x, global_y, ss_def.x[i]+global_x, ss_def.y[i]+global_y); opt.costpdp += (ss_ref.vals[i] - ss_def.vals[i]) * (ss_ref.vals[i] - ss_def.vals[i]); } } @@ -240,7 +240,7 @@ namespace optimizer { // apply shape function parameters to deformed subset shapefunc::get_pixel(ss_def.x[i], ss_def.y[i], ss_ref.x[i], ss_ref.y[i], opt.p); - interp_vals = interp_def.eval_bicubic_and_derivs(global_x, global_y, ss_def.x[i]+global_x, ss_def.y[i]+global_y); + interp_vals = interp_def.eval_and_derivs(global_x, global_y, ss_def.x[i]+global_x, ss_def.y[i]+global_y); ss_def.vals[i] = interp_vals.f; dfdx[i] = interp_vals.dfdx; dfdy[i] = interp_vals.dfdy; @@ -291,7 +291,7 @@ namespace optimizer { sum_squared_def = 0.0; for (int i = 0; i < num_px; ++i) { shapefunc::get_pixel(ss_def.x[i], ss_def.y[i], ss_ref.x[i], ss_ref.y[i], opt.pdp); - ss_def.vals[i] = interp_def.eval_bicubic(global_x, global_y, ss_def.x[i]+global_x, ss_def.y[i]+global_y); + ss_def.vals[i] = interp_def.eval(global_x, global_y, ss_def.x[i]+global_x, ss_def.y[i]+global_y); sum_squared_def += ss_def.vals[i] * ss_def.vals[i]; } @@ -340,7 +340,7 @@ namespace optimizer { // apply shape function parameters to deformed subset shapefunc::get_pixel(ss_def.x[i], ss_def.y[i], ss_ref.x[i], ss_ref.y[i], opt.p); - interp_vals = interp_def.eval_bicubic_and_derivs(global_x, global_y, ss_def.x[i]+global_x, ss_def.y[i]+global_y); + interp_vals = interp_def.eval_and_derivs(global_x, global_y, ss_def.x[i]+global_x, ss_def.y[i]+global_y); ss_def.vals[i] = interp_vals.f; dfdx[i] = interp_vals.dfdx; dfdy[i] = interp_vals.dfdy; @@ -403,7 +403,7 @@ namespace optimizer { mean_def = 0.0; for (int i = 0; i < num_px; ++i) { shapefunc::get_pixel(ss_def.x[i], ss_def.y[i], ss_ref.x[i], ss_ref.y[i], opt.pdp); - ss_def.vals[i] = interp_def.eval_bicubic(global_x, global_y, ss_def.x[i]+global_x, ss_def.y[i]+global_y); + ss_def.vals[i] = interp_def.eval(global_x, global_y, ss_def.x[i]+global_x, ss_def.y[i]+global_y); mean_def += ss_def.vals[i]; } diff --git a/src/pyvale/dic/cpp/dicoptimizer.hpp b/src/pyvale/dic/cpp/dicoptimizer.hpp index 373c05d2b..2908f6e04 100644 --- a/src/pyvale/dic/cpp/dicoptimizer.hpp +++ b/src/pyvale/dic/cpp/dicoptimizer.hpp @@ -13,7 +13,7 @@ // Program Header files #include "./dicresults.hpp" -#include "./dicinterpolator.hpp" +#include "./dicinterp.hpp" namespace optimizer { diff --git a/src/pyvale/dic/cpp/dicscanmethod.cpp b/src/pyvale/dic/cpp/dicscanmethod.cpp index b6c20b47f..12743f13f 100644 --- a/src/pyvale/dic/cpp/dicscanmethod.cpp +++ b/src/pyvale/dic/cpp/dicscanmethod.cpp @@ -27,7 +27,7 @@ // Program Header files -#include "./dicinterpolator.hpp" +#include "./dicinterp.hpp" #include "./dicoptimizer.hpp" #include "./dicutil.hpp" #include "./dicrg.hpp" diff --git a/src/pyvale/dic/cpp/dicsubset.cpp b/src/pyvale/dic/cpp/dicsubset.cpp index 0b9966f25..828cf40ce 100644 --- a/src/pyvale/dic/cpp/dicsubset.cpp +++ b/src/pyvale/dic/cpp/dicsubset.cpp @@ -66,7 +66,7 @@ namespace subset { ss_def.y[count] = subpx_y+y; // get pixel values - ss_def.vals[count] = interp_def.eval_bicubic(0, 0, ss_def.x[count], ss_def.y[count]); + ss_def.vals[count] = interp_def.eval(0, 0, ss_def.x[count], ss_def.y[count]); // debugging //std::cout << ss_def.x[count] << " " << ss_def.y[count] << " " << ss_def.vals[count] << std::endl; @@ -105,7 +105,7 @@ namespace subset { shapefunc::get_pixel(ss_def.x[count], ss_def.y[count], subpx_x+x, subpx_y+y, p); // get pixel values from interpolator - ss_def.vals[count] = interp_def.eval_bicubic(0, 0, ss_def.x[count], ss_def.y[count]); + ss_def.vals[count] = interp_def.eval(0, 0, ss_def.x[count], ss_def.y[count]); count++; } diff --git a/src/pyvale/dic/cpp/dicsubset.hpp b/src/pyvale/dic/cpp/dicsubset.hpp index 45bd01aad..dc792cc49 100644 --- a/src/pyvale/dic/cpp/dicsubset.hpp +++ b/src/pyvale/dic/cpp/dicsubset.hpp @@ -11,7 +11,7 @@ #include // Program Header files -#include "./dicinterpolator.hpp" +#include "./dicinterp.hpp" namespace subset { diff --git a/src/pyvale/dic/dic2d.py b/src/pyvale/dic/dic2d.py index 8800a869c..02de61c3d 100644 --- a/src/pyvale/dic/dic2d.py +++ b/src/pyvale/dic/dic2d.py @@ -24,7 +24,7 @@ def calculate_2d(reference: np.ndarray | str | Path, subset_step: int = 10, correlation_criteria: str="ZNSSD", shape_function: str="AFFINE", - interpolation_routine: str="BICUBIC", + interpolation_routine: str="BSPLINE", max_iterations: int=40, precision: float=0.001, threshold: float=0.9, @@ -107,7 +107,7 @@ def calculate_2d(reference: np.ndarray | str | Path, output_binary : bool, optional Whether to write output in binary format (default: False). output_prefix : str, optional - Prefix for all output files (default: "dic_results_"). results will be + Prefix for all output files (default: :code:`dic_results_`). results will be named with output_prefix + original filename. THe extension will be changed to ".csv" or ".dic2d" depending on whether outputting as a binary. output_delimiter : str, optional diff --git a/src/pyvale/dic/dic2dconv.py b/src/pyvale/dic/dic2dconv.py deleted file mode 100644 index cb15b1118..000000000 --- a/src/pyvale/dic/dic2dconv.py +++ /dev/null @@ -1,6 +0,0 @@ -# ================================================================================ -# pyvale: the python validation engine -# License: MIT -# Copyright (C) 2025 The Computer Aided Validation Team -# ================================================================================ - diff --git a/src/pyvale/dic/dicchecks.py b/src/pyvale/dic/dicchecks.py index 6ad8c0be2..0b402a12f 100644 --- a/src/pyvale/dic/dicchecks.py +++ b/src/pyvale/dic/dicchecks.py @@ -88,13 +88,13 @@ def check_interpolation(interpolation_routine: str) -> None: Validate that the interpolation routine is one of the allowed methods. Checks whether interpolation_routine is a supported - interpolation method. Allowed values are "BILINEAR" and "BICUBIC". If the input + interpolation method. Allowed values are "BSPLINE" and "HERMITE". If the input is not one of these, a `ValueError` is raised. Parameters ---------- interpolation_routine : str - The interpolation method to validate. Must be either "BILINEAR" or "BICUBIC". + The interpolation method to validate. Must be either "BSPLINE" or "HERMITE". Raises ------ @@ -103,7 +103,7 @@ def check_interpolation(interpolation_routine: str) -> None: """ - allowed_values = {"BILINEAR", "BICUBIC"} + allowed_values = {"BSPLINE", "HERMITE"} if interpolation_routine not in allowed_values: raise ValueError(f"Invalid interpolation_routine: " diff --git a/src/pyvale/dic/dicdataimport.py b/src/pyvale/dic/dicdataimport.py index fe6ec3efe..5563fc2c2 100644 --- a/src/pyvale/dic/dicdataimport.py +++ b/src/pyvale/dic/dicdataimport.py @@ -368,7 +368,6 @@ def to_grid(data, shape, ss_x_ref, ss_y_ref, x_unique, y_unique): Maps values using reference subset coordinates (ss_x_ref, ss_y_ref). Parameters - ol ---------- data : np.ndarray Array of shape (n_frames, n_points) to be reshaped into (n_frames, height, width). diff --git a/src/pyvale/dic/dicregionofinterest.py b/src/pyvale/dic/dicregionofinterest.py index bd8fb48c7..9382945a5 100644 --- a/src/pyvale/dic/dicregionofinterest.py +++ b/src/pyvale/dic/dicregionofinterest.py @@ -845,7 +845,7 @@ def _get_roi_data(self, roi_element, add: bool): print('getting rect:', roi_element.pos(), roi_element.size()) return { 'type': 'RectROI', - 'pos': [float(roi_element.pos().x()), roi_element.pos().y()], + 'pos': [float(roi_element.pos().x()), float(roi_element.pos().y())], 'size': [float(roi_element.size().x()), float(roi_element.size().y())], 'add': bool(add) } @@ -858,7 +858,8 @@ def _get_roi_data(self, roi_element, add: bool): } elif isinstance(roi_element, pg.PolyLineROI): handle_pos = roi_element.getLocalHandlePositions() - points = [[float(p[1].x()), float(p[1].y())] for p in handle_pos] + roi_pos = roi_element.pos() + points = [[float(p[1].x() + roi_pos.x()), float(p[1].y() + roi_pos.y())] for p in handle_pos] return { 'type': 'PolyLineROI', 'points': points, @@ -893,15 +894,37 @@ def rect_boundary(self, left: int, right: int, top: int, bottom: int) -> None: self.__roi_selected = True def rect_region(self, x: int, y: int, size_x: int, size_y: int ) -> None: + """ + Select a rectangular region of interest (ROI) in the reference image. - top = max(0, y) - bottom = min(self.ref_image.shape[0],y+size_y) - left = max(0, x) - right = min(self.ref_image.shape[1],x+size_x) + The rectangle is defined by its top-left corner and size. The region is + automatically clipped to the image bounds. The selected area is marked + in the internal mask and flags the ROI as selected. - # Apply the mask in the subset region - self.mask[top:bottom, left:right] = 255 - self.__roi_selected = True + Parameters + ---------- + x : int + X-coordinate (column index) of the top-left corner of the rectangle. + y : int + Y-coordinate (row index) of the top-left corner of the rectangle. + size_x : int + Width of the rectangular region in pixels. + size_y : int + Height of the rectangular region in pixels. + + Returns + ------- + None + """ + + top = max(0, y) + bottom = min(self.ref_image.shape[0],y+size_y) + left = max(0, x) + right = min(self.ref_image.shape[1],x+size_x) + + # Apply the mask in the subset region + self.mask[top:bottom, left:right] = 255 + self.__roi_selected = True diff --git a/src/pyvale/dic/dicresults.py b/src/pyvale/dic/dicresults.py index 3746b5e63..912423a29 100644 --- a/src/pyvale/dic/dicresults.py +++ b/src/pyvale/dic/dicresults.py @@ -15,44 +15,39 @@ class Results: This dataclass stores the displacements, convergence info, and correlation data associated with a DIC computation. - - Attributes - ---------- - ss_x : np.ndarray - The x-coordinates of the subset centers (in pixels). shape=(img_num,y,x) - ss_y : np.ndarray - The y-coordinates of the subset centers (in pixels). shape=(img_num,y,x) - u : np.ndarray - Horizontal displacements at each subset location. shape=(img_num,y,x) - v : np.ndarray - Vertical displacements at each subset location. shape=(img_num,y,x) - mag : np.ndarray - Displacement magnitude at each subset location, typically computed as sqrt(u^2 + v^2). shape=(img_num,y,x) - converged : np.ndarray - boolean value for whether the subset has converged or not. shape=(img_num,y,x) - cost : np.ndarray - Final cost or residual value from the correlation optimization (e.g., ZNSSD). shape=(img_num,y,x) - ftol : np.ndarray - Final `ftol` value from the optimization routine, indicating function tolerance. shape=(img_num,y,x) - xtol : np.ndarray - Final `xtol` value from the optimization routine, indicating solution tolerance. shape=(img_num,y,x) - niter : np.ndarray - Number of iterations taken to converge for each subset point. shape=(img_num,y,x) - shape_params : np.ndarray | None - Optional shape parameters if output during DIC calculation (e.g., affine, rigid). shape=(img_num,y,x) - filenames : list[str] - name of DIC result files that have been found """ ss_x: np.ndarray + """The x-coordinates of the subset centers (in pixels). shape=(img_num,y,x)""" ss_y: np.ndarray + """The y-coordinates of the subset centers (in pixels). shape=(img_num,y,x)""" + u: np.ndarray + """Horizontal displacements at each subset location. shape=(img_num,y,x)""" + v: np.ndarray - mag: np.ndarray - converged: np.ndarray - cost: np.ndarray - ftol: np.ndarray - xtol: np.ndarray - niter: np.ndarray - shape_params: np.ndarray - filenames: list[str] + """Vertical displacements at each subset location. shape=(img_num,y,x)""" + + mag: np.ndarray | None = None + """Displacement magnitude at each subset location, typically computed as sqrt(u^2 + v^2). shape=(img_num,y,x)""" + + converged: np.ndarray | None = None + """boolean value for whether the subset has converged or not. shape=(img_num,y,x)""" + + cost: np.ndarray | None = None + """Final cost or residual value from the correlation optimization (e.g., ZNSSD). shape=(img_num,y,x)""" + + ftol: np.ndarray | None = None + """Final `ftol` value from the optimization routine, indicating function tolerance. shape=(img_num,y,x)""" + + xtol: np.ndarray | None = None + """Final `xtol` value from the optimization routine, indicating solution tolerance. shape=(img_num,y,x)""" + + niter: np.ndarray | None = None + """Number of iterations taken to converge for each subset point. shape=(img_num,y,x)""" + + shape_params: np.ndarray | None = None + """Optional shape parameters if output during DIC calculation (e.g., affine, rigid). shape=(img_num,y,x)""" + + filenames: list[str] | None = None + """name of DIC result files that have been found""" \ No newline at end of file diff --git a/src/pyvale/examples/dic/ex2_plate_with_hole.py b/src/pyvale/examples/dic/ex2_plate_with_hole.py index 5d199af01..090690123 100644 --- a/src/pyvale/examples/dic/ex2_plate_with_hole.py +++ b/src/pyvale/examples/dic/ex2_plate_with_hole.py @@ -63,7 +63,7 @@ roi.read_array(filename=roi_file, binary=False) # %% -# Now we can run the 2D DIC engine using :func:`pyvale.dic_2d`. +# Now we can run the 2D DIC engine using :func:`pyvale.dic.calculate_2d`. # # This function accepts many optional arguments — consult the documentation for full details. # At a minimum, you’ll need to specify: @@ -80,7 +80,7 @@ # At present, the DIC engine doesn't return any results to the user, instead the results are saved to disk. # You can customize the filename, location, format, and delimiter using # the options options `output_basepath`, `output_prefix`, `output_delimiter`, and `output_binary`. -# More info on these options can be found in the documentation for :func:`dic.two_dimensional`. +# More info on these options can be found in the API documentation for :func:`pyvale.dic.calculate_2d`. # By default, the results will be saved with the prefix `dic_results_` followed # by the original filename. The file extension will be replaced will either ".csv" or "dic2d" # depending on whether the results are being saved in human-readable or binary format. diff --git a/src/pyvale/examples/dic/ex6_hrdic.py b/src/pyvale/examples/dic/ex6_hrdic.py new file mode 100644 index 000000000..a2e4013d7 --- /dev/null +++ b/src/pyvale/examples/dic/ex6_hrdic.py @@ -0,0 +1,125 @@ +#================================================================================ +#Example: thermocouples on a 2d plate +# +#pyvale: the python validation engine +#License: MIT +#Copyright (C) 2024 The Computer Aided Validation Team +#================================================================================ +""" +HRDIC +--------------------- + +This example walks through how a user might setup a DIC calculation for large +displacment images, which are often found in micromechanics. +The images used in this example are 10000x10000 pixels so +you'll need to download them from a seperate location. +This example assumes the files are in the same +directory/folder as the code. + +**Images can be downloaded** `here `_. +""" + + + +# pyvale modules +import pyvale.dic as dic +import matplotlib.pyplot as plt +import numpy as np + +# %% +# Because of the size of the images, we'll avoid using the interactive GUI for this example. +# We'll create an ROI mask that is the same size as the images using the +# :code:`roi.rect_boundary` command. If you know that pixels along a certain edge will go +# outside the image bounds post deformation, then excluding them from the ROI +# will prevent the DIC engine from trying to correlate subsets in the reference +# image are not present in the deformed image. +roi = dic.RegionOfInterest(ref_image="ref.tiff") +roi.rect_boundary(left=0,right=0,top=0,bottom=0) + + +# %% +# We'll also chose a seed location. We've picked this at the centre of the image +# because this approximate area will be displaced the least. +roi.seed = [5000,5000] + +# %% +# We can then run the DIC engine using our ROI mask and seed location. +# + +dic.calculate_2d(reference="ref.tiff", + deformed="def.tiff", + roi_mask=roi.mask, + seed=roi.seed, + subset_size=31, + subset_step=15, + max_displacement=1000, + fft_mad=True, + fft_mad_scale=3.0, + correlation_criteria="ZNSSD", + shape_function="AFFINE", + threshold=0.8, + output_basepath="./") + +# %% +# There's a couple of things to pay attention to here: +# +# * :code:`max_displacement`: Sets the assumed maximum displacment. It is better +# to overestimate rather than underestimate this value +# * :code:`fft_mad` and :code:`fft_mad_scale`: These remove outliers by +# calculating a Mean Absolute Deviation (MAD) at each stage of the FFT windowing +# and prevent the propagation of incorrect rigid body estimates down through the window sizes. +# The mad scale value determines how strict the outlier removal is - larger values of +# make the criterion more permissive, whereas smaller values more aggressively suppress deviations. +# * :code:`threshold`: Sets the minimum correlation coefficient value for a +# subset to be considered a good match. Any correlation coefficients less than +# this value will not be present in the results. If you want to have all +# values present in the output, you can set +# :code:`output_below_threshold=True`. +# +# **This might take some time to run depending on the amount of available +# threads! You can manually set this with the** :code:`num_threads` **argument.** + +# %% +# The results can be imported in a standard way: +dic_files = "dic_results_*.csv" +dicdata = dic.import_2d(data=dic_files) + +# %% +# And finally a simple plot of the displacements and correlation +# coefficient. +fig, axes = plt.subplots(1, 3, figsize=(15, 10)) +axes = axes.flatten() + +# First deformation image +im1 = axes[0].pcolor(dicdata.ss_x, dicdata.ss_y, dicdata.u[0]) +im2 = axes[1].pcolor(dicdata.ss_x, dicdata.ss_y, dicdata.v[0]) +im3 = axes[2].pcolor(dicdata.ss_x, dicdata.ss_y, dicdata.cost[0]) + +# Titles +axes[0].set_title('u component') +axes[1].set_title('v component') +axes[2].set_title('cost') + +# Axis ticks +ticks = np.arange(0, 10001, 2500) +for ax in axes: + ax.set_aspect('equal') + ax.set_xlim(0, 10000) + ax.set_ylim(0, 10000) + ax.set_xticks(ticks) + ax.set_yticks(ticks) + + +# Colorbars +fig.colorbar(im1, ax=axes[0], fraction=0.046, pad=0.04) +fig.colorbar(im2, ax=axes[1], fraction=0.046, pad=0.04) +fig.colorbar(im3, ax=axes[2], fraction=0.046, pad=0.04) + +plt.tight_layout() +plt.show() + +# %% +# .. image:: ../../../../_static/hrdic.png +# :alt: Displacement and cost values +# :width: 100% +# :align: center diff --git a/src/pyvale/examples/extsensorsim/ex1_byosimdata.py b/src/pyvale/examples/extsensorsim/ex1_byosimdata.py index 327ce709e..531b497b7 100644 --- a/src/pyvale/examples/extsensorsim/ex1_byosimdata.py +++ b/src/pyvale/examples/extsensorsim/ex1_byosimdata.py @@ -81,7 +81,7 @@ # virtual sensors but we will load it here to demonstrate mesh-based sensors. In # this case each meshed object in the simulation has a connectivity table # labelled "connectX" where X is an integer specifying the unique mesh in the -# simulation. The "hex20_connect1.csv" has the shape 20 by number of elements +# simulation. The "hex20_connect1.csv" has the shape 20 by number of elements # in the mesh as we are using 20 node hexahedral elements. # # We can also see the field files which are labelled "hex20_node_field_*" with diff --git a/src/pyvale/mooseherder/simsaver.py b/src/pyvale/mooseherder/simsaver.py index 0f344bd2f..bede30d8a 100644 --- a/src/pyvale/mooseherder/simsaver.py +++ b/src/pyvale/mooseherder/simsaver.py @@ -70,6 +70,7 @@ def save_array(save_file: Path, class ESaveFieldOpt(enum.Enum): """Enumeration specifying how to save physics fields as: + - 'BY_TIME': One array per time step where the first dimension is the nodal coordinates and the second dimension is the field component. - 'BY_FIELD': A single array per nodal field where the first dimension is diff --git a/src/pyvale/sensorsim/camerasensor.py b/src/pyvale/sensorsim/camerasensor.py index a676785f6..8063dcb53 100644 --- a/src/pyvale/sensorsim/camerasensor.py +++ b/src/pyvale/sensorsim/camerasensor.py @@ -9,13 +9,13 @@ """ import numpy as np -from pyavle.sensorsim.field import IField -from pyavle.sensorsim.sensorarray import ISensorArray -from pyavle.sensorsim.errorintegrator import ErrIntegrator -from pyavle.sensorsim.sensordescriptor import SensorDescriptor -from pyavle.sensorsim.fieldsampler import sample_field_with_sensor_data -from pyavle.sensorsim.cameradata2d import CameraData2D -from pyavle.sensorsim.cameratools import CameraTools +from pyvale.sensorsim.field import IField +from pyvale.sensorsim.sensorarray import ISensorArray +from pyvale.sensorsim.errorintegrator import ErrIntegrator +from pyvale.sensorsim.sensordescriptor import SensorDescriptor +from pyvale.sensorsim.fieldsampler import sample_field_with_sensor_data +from pyvale.sensorsim.cameradata2d import CameraData2D +from pyvale.sensorsim.cameratools import CameraTools diff --git a/src/pyvale/sensorsim/fieldconverter.py b/src/pyvale/sensorsim/fieldconverter.py index 801df6645..28ec00845 100644 --- a/src/pyvale/sensorsim/fieldconverter.py +++ b/src/pyvale/sensorsim/fieldconverter.py @@ -137,6 +137,7 @@ def extract_surf_mesh(sim_data: mh.SimData) -> mh.SimData: """Extracts a surface mesh from a 3D simulation dataclass. Useful for limiting the memory required for analysing sensors that only measure surface fields. This function currently supports: + - A single connectivity table - Higher order retrahedral and hexahedral elements (but not wedges or pyramids) diff --git a/src/pyvale/sensorsim/visualsimplotter.py b/src/pyvale/sensorsim/visualsimplotter.py index a7a39b5f7..1754a99c4 100644 --- a/src/pyvale/sensorsim/visualsimplotter.py +++ b/src/pyvale/sensorsim/visualsimplotter.py @@ -11,11 +11,11 @@ import pyvale.mooseherder as mh from pyvale.sensorsim.sensorspoint import SensorsPoint -from pyvale.sensorsim.fieldconverter import simdata_to_pyvista +from pyvale.sensorsim.fieldconverter import simdata_to_pyvista_vis from pyvale.sensorsim.visualopts import (VisOptsSimSensors,VisOptsImageSave) from pyvale.sensorsim.visualtools import (create_pv_plotter, - get_colour_lims, - save_image) + get_colour_lims) +from pyvale.sensorsim.imagetools import ImageTools #TODO: Docstrings @@ -106,7 +106,7 @@ def plot_sim_mesh(sim_data: mh.SimData, if vis_opts is None: vis_opts = VisOptsSimSensors() - pv_simdata = simdata_to_pyvista(sim_data, + pv_simdata = simdata_to_pyvista_vis(sim_data, None, sim_data.num_spat_dims) @@ -129,7 +129,7 @@ def plot_sim_data(sim_data: mh.SimData, if vis_opts is None: vis_opts = VisOptsSimSensors() - pv_simdata = simdata_to_pyvista(sim_data, + pv_simdata = simdata_to_pyvista_vis(sim_data, (component,), sim_data.num_spat_dims) @@ -174,7 +174,7 @@ def plot_point_sensors_on_sim(sensor_array: SensorsPoint, pv_plot.camera_position = vis_opts.camera_position if image_save_opts is not None: - save_image(pv_plot,image_save_opts) + ImageTools.save_image(pv_plot,image_save_opts) return pv_plot diff --git a/src/pyvale/strain/strain.py b/src/pyvale/strain/strain.py index 9c1ae1420..0216476ce 100644 --- a/src/pyvale/strain/strain.py +++ b/src/pyvale/strain/strain.py @@ -10,11 +10,12 @@ from pyvale.strain.strainresults import StrainResults from pyvale.strain.strainchecks import check_strain_files from pyvale.dic.dicdataimport import import_2d +from pyvale.dic.dicresults import Results as dicResults from pyvale.common_py.util import check_output_directory import pyvale.strain.strain_cpp as strain_cpp import pyvale.common_cpp.common_cpp as common_cpp -def calculate_2d(data: str | Path, +def calculate_2d(data: dicResults | str | Path, window_size: int=5, window_element: int=9, input_binary: bool=False, @@ -33,15 +34,18 @@ def calculate_2d(data: str | Path, Parameters ---------- - data : pathlib.Path or str - A pathlib.Path or str to files from which the data should be imported. + data : dic.Results, pathlib.Path or str + input data can either be a dic.Results object or pathlib.Path / str if importing data + straight from a file input_delimiter: str - delimiter used for the input dic results files (default: ","). + delimiter used for the input dic results files if using + pathlib.Path or str for data import (default: ","). input_binary bool: - whether input data is in human-readable or binary format (default: - False). + whether input data is in human-readable or binary format if using + pathlib.Path or str for data import (default: False). window_size : int, optional - The size of the local window over which to compute strain (must be odd), by default 5. + The size of the local window over which to compute strain (must be odd, + default: 5). window_element : int, optional The type of finite element shape function used in the strain window: 4 (bilinear) or 9 (biquadratic), by default 4. @@ -53,7 +57,7 @@ def calculate_2d(data: str | Path, output_binary : bool, optional Whether to write output in binary format (default: False). output_prefix : str, optional - Prefix for all output files (default: "strain_"). results will be + Prefix for all output files (default: :code:`strain_`). results will be named with output_prefix + original filename. THe extension will be changed to ".csv" or ".dic2d" depending on whether outputting as a binary. output_delimiter : str, optional @@ -79,16 +83,35 @@ def calculate_2d(data: str | Path, if window_size % 2 == 0: raise ValueError(f"Invalid strain window size: '{window_size}'. Must be an odd number.") - filenames = check_strain_files(strain_files=data) - # Load data if a file path is given - results = import_2d(layout="matrix", data=str(data), - binary=input_binary, delimiter=input_delimiter) + if isinstance(data, (str, Path)): + filenames = check_strain_files(strain_files=data) + + # Load data if a file path is given + dicresults = import_2d(layout="matrix", data=str(data), + binary=input_binary, delimiter=input_delimiter) + + elif isinstance(data, dicResults): + dicresults = data + print(dicresults.ss_x.shape, dicresults.ss_y.shape, dicresults.u.shape,dicresults.v.shape) + assert dicresults.ss_x.ndim == 2 and dicresults.ss_y.ndim == 2, "ss_x and ss_y must be 2D" + assert dicresults.ss_x.shape == dicresults.ss_y.shape, "ss_x and ss_y must have the same shape" + assert dicresults.u.ndim == 3 and dicresults.v.ndim == 3, "u and v must be 3D" + assert dicresults.u.shape == dicresults.v.shape, "u and v must have the same shape" + assert dicresults.u.shape[1:] == dicresults.ss_x.shape, "Spatial dimensions of u must match ss_x" + + # need to make dummy filenames + filenames = [] + for f in range(0,dicresults.u.shape[0]): + filenames.append(f"strain_data_{f:04d}") + + else: + raise TypeError(f"Unexpected displacement data type: {type(data)}") # Extract dimensions from the validated object - nss_x = results.ss_x.shape[1] - nss_y = results.ss_y.shape[0] - nimg = results.u.shape[0] + nss_x = dicresults.ss_x.shape[1] + nss_y = dicresults.ss_x.shape[0] + nimg = dicresults.u.shape[0] check_output_directory(str(output_basepath), output_prefix, 0) @@ -101,11 +124,11 @@ def calculate_2d(data: str | Path, strain_save_conf.delimiter = output_delimiter strain_save_conf.at_end = output_at_end - print(filenames) + print(type(filenames)) # Call to C++ backend - strain_cpp.strain_engine(results.ss_x, results.ss_y, - results.u, results.v, + strain_cpp.strain_engine(dicresults.ss_x, dicresults.ss_y, + dicresults.u, dicresults.v, nss_x, nss_y, nimg, window_size, window_element, strain_formulation, filenames, diff --git a/src/pyvale/strain/strainresults.py b/src/pyvale/strain/strainresults.py index d2b445e01..e9993b44c 100644 --- a/src/pyvale/strain/strainresults.py +++ b/src/pyvale/strain/strainresults.py @@ -11,13 +11,12 @@ @dataclass(slots=True) class StrainResults: """ - Data container for Strain analysis results. - - This dataclass stores the strain window coordinates, 2D deformation gradient - and strain values. + Data container for Strain analysis results. This dataclass stores the strain + window coordinates, 2D deformation gradient and strain values. Attributes ---------- + window_x : np.ndarray The x-coordinates of the strain window centre. shape=(img_num,y,x) window_y : np.ndarray @@ -32,11 +31,11 @@ class StrainResults: The yy component of the 2D deformation gradient. shape=(img_num,y,x) eps_xx : np.ndarray The xx component of the 2D strain tensor. shape=(img_num,y,x) - eps_xy : np.ndarray + eps_xy : np.ndarray The xy component of the 2D strain tensor. shape=(img_num,y,x) - eps_yx : np.ndarray + eps_yx : np.ndarray The yx component of the 2D strain tensor. shape=(img_num,y,x) - eps_yy : np.ndarray + eps_yy : np.ndarray The yy component of the 2D strain tensor. shape=(img_num,y,x) filenames : list[str] name of Strain result files that have been found diff --git a/src/pyvale/verif/pointsensmultiphys.py b/src/pyvale/verif/pointsensmultiphys.py index 2f3bc1a6a..aea685ec6 100644 --- a/src/pyvale/verif/pointsensmultiphys.py +++ b/src/pyvale/verif/pointsensmultiphys.py @@ -166,7 +166,7 @@ def exp_sim_2d() -> dict[str,sens.ExperimentSimulator]: # print(f"{ee=}") # print(80*"-") if err_chain_dict[ff][ee] is not None: - err_int_opts = sens.ErrIntOpts()q + err_int_opts = sens.ErrIntOpts() this_sens.set_error_chain(err_chain_dict[ff][ee], err_int_opts) diff --git a/tests/dic/test_dic.py b/tests/dic/test_dic.py index 48c837c75..698a300ac 100644 --- a/tests/dic/test_dic.py +++ b/tests/dic/test_dic.py @@ -130,7 +130,6 @@ def test_image_scan_znssd_affine(): subset_step=15, max_displacement=2, correlation_criteria="ZNSSD", - interpolation_routine="BICUBIC", shape_function="AFFINE", method="IMAGE_SCAN", output_basepath=test_dir, @@ -159,7 +158,6 @@ def test_image_scan_znssd_rigid(): subset_step=15, max_displacement=2, correlation_criteria="ZNSSD", - interpolation_routine="BICUBIC", shape_function="RIGID", method="IMAGE_SCAN", output_basepath=test_dir, @@ -188,7 +186,6 @@ def test_image_scan_nssd_affine(): subset_step=15, max_displacement=2, correlation_criteria="NSSD", - interpolation_routine="BICUBIC", shape_function="AFFINE", method="IMAGE_SCAN", output_basepath=test_dir, @@ -217,7 +214,6 @@ def test_rg_znssd_affine(): subset_step=15, max_displacement=2, correlation_criteria="ZNSSD", - interpolation_routine="BICUBIC", shape_function="AFFINE", method="MULTIWINDOW_RG", output_basepath=test_dir, @@ -246,7 +242,6 @@ def test_fft_large(): subset_step=15, max_displacement=100, correlation_criteria="ZNSSD", - interpolation_routine="BICUBIC", shape_function="RIGID", method="MULTIWINDOW", output_basepath=test_dir, diff --git a/tests/strain/test_strain.py b/tests/strain/test_strain.py new file mode 100644 index 000000000..ba7bb243e --- /dev/null +++ b/tests/strain/test_strain.py @@ -0,0 +1,119 @@ + + +import numpy as np +import pytest +import scipy.linalg +import os +import glob + +#pyvale stuff +import pyvale.dic as dic +import pyvale.strain as strain + + +def generate_affine_displacement_grid(F, nx=100, ny=100): + """Generate displacement field for a uniform deformation gradient F.""" + x = np.linspace(0, 990, nx) + y = np.linspace(0, 990, ny) + X, Y = np.meshgrid(x, y, indexing="ij") + + Ux = np.zeros((1, nx, ny)) + Uy = np.zeros((1, nx, ny)) + + # create displacement field + for i in range(nx): + for j in range(ny): + pos = np.array([X[i,j], Y[i,j]]) + disp = (F - np.eye(2)) @ pos + Ux[0, i, j] = disp[0] + Uy[0, i, j] = disp[1] + + return X, Y, Ux, Uy + + +def reference_strain(F, formulation): + I = np.eye(2) + C = F.T @ F + B = F @ F.T + U = scipy.linalg.sqrtm(C) + V = scipy.linalg.sqrtm(B) + + if formulation == "GREEN": + return 0.5 * (C - I) + if formulation == "ALMANSI": + return 0.5 * (I - np.linalg.inv(B)) + if formulation == "BIOT_LAGRANGE": + return U - I + if formulation == "BIOT_EULER": + return V - I + if formulation == "HENCKY": + return scipy.linalg.logm(U) + + raise ValueError(formulation) + + +# Strain formulations I've got implemented currently +@pytest.mark.parametrize("strain_formulation", [ + "GREEN", + "ALMANSI", + "BIOT_LAGRANGE", + "BIOT_EULER", + "HENCKY", +]) + +# list of deformation types to check against. +@pytest.mark.parametrize("deformation_type,F", [ + ("uniform_x_stretch", np.array([[1.02, 0], [0, 1.0]])), + ("uniform_y_stretch", np.array([[1.0, 0], [0, 1.03]])), + ("shear_x", np.array([[1.0, 0.05], [0.0, 1.0]])), + ("shear_y", np.array([[1.0, 0.0], [0.05, 1.0]])), + ("stretch_and_shear", np.array([[1.02, 0.03], [0.0, 1.01]])), + ("rotation", np.array([[np.cos(np.pi/12), -np.sin(np.pi/12)], + [np.sin(np.pi/12), np.cos(np.pi/12)]])), +]) + +def test_strain_deformations(strain_formulation, deformation_type, F): + # Generate displacement field + X, Y, Ux, Uy = generate_affine_displacement_grid(F) + + input_data = dic.Results(X, Y, Ux, Uy) + + # Compute strain + strain_results = strain.calculate_2d( + data=input_data, + window_size=5, + window_element=4, + strain_formulation=strain_formulation, + output_prefix=f"strain_{strain_formulation}_{deformation_type}_" + ) + + # Analytic reference strain + expected = reference_strain(F, strain_formulation) + + strainresults = strain.import_2d(f"./strain_{strain_formulation}_{deformation_type}_*.csv") + + # Map of deformation gradient and strain components + checks = { + "def_xx": F[0,0], + "def_xy": F[0,1], + "def_yx": F[1,0], + "def_yy": F[1,1], + "eps_xx": expected[0,0], + "eps_xy": expected[0,1], + "eps_yx": expected[1,0], + "eps_yy": expected[1,1], + } + + # Loop through and assert all + for attr, val in checks.items(): + np.testing.assert_allclose( + getattr(strainresults, attr), + val, + rtol=1e-5, + atol=1e-7, + err_msg=f"{attr} incorrect for {strain_formulation} / {deformation_type}" + ) + + for file_path in glob.glob(f"./strain_{strain_formulation}_{deformation_type}_*.csv"): + os.remove(file_path) +