diff --git a/.vscode/c_cpp_properties.json b/.vscode/c_cpp_properties.json index 66b08a8..4724d1e 100644 --- a/.vscode/c_cpp_properties.json +++ b/.vscode/c_cpp_properties.json @@ -6,7 +6,8 @@ "${workspaceFolder}/**", "${workspaceFolder}/src/vstarstack/library/fine_movement/libimagedeform/include", "/usr/include/python3.11", - "/usr/include/python3.13" + "/usr/include/python3.13", + "/home/vlad/Astronomy/software/vstarstack/lib/python3.13/site-packages/numpy/_core/include" ], "defines": [], "compilerPath": "/usr/bin/gcc", diff --git a/.vscode/settings.json b/.vscode/settings.json index e5f59e6..68d4de7 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -43,7 +43,8 @@ "span": "cpp", "stdexcept": "cpp", "typeinfo": "cpp", - "imagegrid.h": "c" + "map": "c", + "format": "c" }, "editor.tabSize": 4, "editor.insertSpaces": true, diff --git a/doc/deepsky.tex b/doc/deepsky.tex new file mode 100644 index 0000000..92c5fcd --- /dev/null +++ b/doc/deepsky.tex @@ -0,0 +1,303 @@ +\documentclass{article} +\usepackage{mathtools} + +\begin{document} + +\section{Task} + +We have frames $F(x,y)$: + +\begin{eqnarray} + F(x,y) \sim Poisson\left( \lambda(x,y) \right) \\ + \lambda(x,y) = f_{dark}(x,y) + v(x,y) \cdot \left( f_{sky}(x,y) + f_{true}(x,y) \right) +\end{eqnarray} + +Where +\begin{itemize} + \item $f_{dark}(x,y)$ --- dark signal + \item $f_{sky}(x,y)$ --- sky signal (light pollution) + \item $f_{true}(x,y)$ --- true signal + \item $v(x,y)$ --- flat relative calibration function (vignetting, particles on sensor, etc) +\end{itemize} + +What does "relative" means? We have telescope lens or mirror, sensor with some QE, exposure, sensor gain. We have Poisson--distributed amount +of photons and dark electrons on sensor, which is multiplied by x,y--independent factor of QE, lens size, exposure, and x,y--dependent factor +of vignetting, particles on sensor, etc. We want to compensate x,y--dependent factors. So we can assume $max(v) = 1$, because it shows +relative coefficient to x,y--maximal value. If $max(v)$ would be, for example, $0.5$, it would be same as twice less QE with $max(v) = 1$. +But we know that it still will be Poisson distribution. + +We want to estimate true value of signal: $f_{true}(x,y)$. + +\section{Calibrations} + +\subsection{Dark signal} + +We can find $f_{dark}(x,y)$ by taking enough dark frames $D_k(x,y)$ and find mean of them. +In later research we assume that we know $f_{dark}(x,y)$ with absolute precision. + +\begin{equation} + D_k(x,y) \sim Poisson(f_{dark}(x,y)) +\end{equation} + +So we find + +\begin{equation} + f_{dark}(x,y) = \frac{\sum_{k=1}^K D_k(x,y)}{K} +\end{equation} + +\subsection{Flats} + +We need to calculate $v(x,y)$. We can either use flatbox or capture enough images of sky in different positions. +Building flat is not a subject of current article. In later we assume that we know $v(x,y)$ with absolute precision. + +\section{Signal obtaining} + +As input we have: +\begin{itemize} + \item $d(x,y)$ --- constant signal (dark or dark + sky, depending on case) + \item $v(x,y)$ --- normalized flats function + \item $F_j(x,y)$ --- captured frames +\end{itemize} + +We know that $F_i$ have Poisson distribution: +\begin{eqnarray} + F_i(x,y) = Poisson\left( d(x,y) + v(x,y) \cdot f(x,y) \right) +\end{eqnarray} + +How $d$ is related with $f_{dark}$, $f_{sky}$? It depends on approach. +In most basic approach we do 2--step calculation: +\begin{itemize} + \item Capture dark frames + \item Capture flat frames + \item Capture target frames + \item Considering $d = f_{dark}$ find image signal $f_{full}$. It is sum of $f_{sky}$ and $f$ + \item Analyze $f_{full}$ and find $f_{sky}$ from it + \item Considering $d = f_{dark} + v \cdot f_{sky}$ find target image signal $f$ +\end{itemize} + +In case of high light pollution we even don't need bayesian calculation to obtain $f_{sky}$ + +\subsection{First approach} + +We act the same way as for flats calculation: +\begin{eqnarray} + = d(x,y) + v(x,y) \cdot f(x,y) \\ + f(x,y) = \frac{ - d(x,y)}{v(x,y)} = \frac{}{v(x,y)} +\end{eqnarray} + +Here we use Sigma-Clipping: + +\begin{eqnarray} + f(x,y) = \frac{SC(\left\{ F_i(x,y) - d(x,y)\right\})}{v(x,y)} +\end{eqnarray} + +This implements the most basic approach: substract darks, then apply flats, then do Sigma-Clipping. +This method is simple, but can generate negative values, especially for low SNR case. + +\subsection{Bayesian analysis} + +We have set of frames $F_i(x,y)$. We want to find estimation for $f(x,y)$: + +\begin{eqnarray} + E\left[ f(x,y) \right] = \int df(x,y) \cdot f(x,y) \cdot p(f(x,y) | \left\{ F_i(x,y) \right\}) +\end{eqnarray} + +Let's find $p(f(x,y) | \left\{ F_i(x,y) \right\})$. We process independently for each $x,y$, so they are just parameters, +not integration varables. So we won't write them for readability: + +\begin{eqnarray} + E\left[ f \right] = \int df \cdot f \cdot p(f | \left\{ F_i \right\}) +\end{eqnarray} + +According to Bayes' theorem + +\begin{eqnarray} + p(f | \left\{ F_i \right\}) = \frac{p(\left\{ F_i \right\} | f) \cdot p(f)}{p(\left\{ F_i \right\})} = + \frac{p(\left\{ F_i \right\} | f) \cdot p(f)}{\int p(\left\{ F_i \right\} | f') \cdot p(f') \cdot df'} \label{eq:bayes} +\end{eqnarray} + +where $p(f)$ --- apriori probability of $f$. We should estimate it before starting Bayesian analysis. + +It's very important that +\begin{equation} + p(\left\{ F_i \right\}) \ne \prod_i p(F_i) +\end{equation} + +It happens because $F_i$ are not marginally independent: if $P(AB|C_i) = P(A|C_i) \cdot P(B|C_i)$ for each $i$, it doesn't mean that $P(AB) = P(A)\cdot P(B)$ + +But $F_i|f$ are conditionally independent, so we can write +\begin{equation} + p(\left\{ F_i \right\} | f) = \prod_i p(F_i | f) +\end{equation} + +So we can continue (\ref{eq:bayes}) and write it as +\begin{eqnarray} + p(f | \left\{ F_i \right\}) = \frac{\prod_i p(F_i|f) \cdot p(f)}{\int \prod_i p(F_i|f') \cdot p(f') df'} \label{eq:bayes2} +\end{eqnarray} + +Let's remember what $p(F_i | f)$ is: +\begin{equation} +\begin{array}{r@{}l} + p(F_i | f) = \frac{\lambda^{F_i} \exp(-\lambda)} {F_i!} \\ + \lambda = d(x,y) + v(x,y) \cdot f(x,y) +\end{array} +\end{equation} + +Let's substutute it into (\ref{eq:bayes2}): +\begin{equation} +\begin{array}{r@{}l} + p(f | \left\{ F_i \right\}) = \frac{ \prod_i \frac{\lambda(f)^{F_i} \cdot \exp(-\lambda(f))}{F_i!} \cdot p(f) } + {\int \prod_i \frac{\lambda(f)^{F_i} \cdot \exp(-\lambda(f))}{F_i!} p(f') df'} +\end{array} +\end{equation} + +After simplyfying it we have: +\begin{equation} + p(f | \left\{ F_i \right\}) = \frac{\lambda(f)^{\sum F_i} \cdot \exp(-N\lambda(f)) \cdot p(f)} + {\int \lambda(f')^{\sum F_i} \cdot \exp(-N\lambda(f')) \cdot p(f') df'} +\end{equation} + +where $N$ --- amount of $F_i$. Factorials are gone. And for case where $p(f) \ne 0$, $\lambda(f) \ne 0$ we can simplify: + +\begin{equation} + p(f | \left\{ F_i \right\}) = \left( \int \left( \frac{\lambda(f')}{\lambda(f)} \right)^{\sum F_i} \cdot + \exp\left\{-N \cdot (\lambda(f')-\lambda(f)) \right\} \cdot \left( \frac{p(f')}{p(f)} \right) df'\right)^{-1} +\end{equation} + +But there can be some practical case: what if different frames have different $f_{dark}$ or $f_{sky}$ or $\nu$? In that case we have series +of $\lambda_i(f)$ and formula slightly changes: +\begin{equation} + p(f | \left\{ F_i \right\}) = \left( \int \prod_i \left( \frac{\lambda_i(f')}{\lambda_i(f)} \right)^{F_i} \cdot + \exp\left\{-\sum_i \left(\lambda_i(f')-\lambda_i(f)\right)\right\} \cdot \left( \frac{p(f')}{p(f)} \right) df'\right)^{-1} +\end{equation} + +In this formula we have 2 components $\prod_i \left( \frac{\lambda_i(f')}{\lambda_i(f)} \right)^{F_i}$ and +$\exp\left\{-\sum_i \left(\lambda_i(f')-\lambda_i(f)\right) \right\}$. For big $f'$ first would be big, but it will be compensated by exponent. +But it can cause overflow. So we can write: + +\begin{eqnarray} + p(f | \left\{ F_i \right\}) = \left( \int \exp\left\{\sum_i \left( F_i ln \lambda_i(f') - \lambda_i(f')\right) + - \left( F_i ln \lambda_i(f) - \lambda_i(f)\right) \right\} \cdot \frac{p(f')}{p(f)} df' \right)^{-1} \label{eq:bayes3} +\end{eqnarray} + +Or, introducing $\Lambda_i$: + +\begin{eqnarray} + \Lambda_i(f) = F_i ln \lambda_i(f) - \lambda_i(f) \\[9pt] + p(f | \left\{ F_i \right\}) = \left( \int \exp\left\{\sum_i \Lambda_i(f') - \Lambda_i(f) \right\}\cdot \frac{p(f')}{p(f)} df' \right)^{-1} +\end{eqnarray} + +Also we can use that formula in case when $f$ is a vector. But we have to adjust $\lambda(f)$ in that case: + +\begin{eqnarray} + \lambda_i (f_{\alpha}) = d_{i} + v_{i} \cdot \sum_{\beta} K_{i, \alpha} \cdot f_{\alpha} +\end{eqnarray} + +Here: +\begin{itemize} +\item $i$ --- index for frames +\item $\alpha$ --- index for $f$ dimensions +\item $K$ --- matrix for contribution of each component into common signal +\end{itemize} + +Challenges with such approach: +\begin{itemize} +\item we need some a-priori knowledge of probability $p(f)$ +\item integral calculation over $R_{+}$ is pretty long procedure. So we can calculate it only in some area around maxima of $p(f')$ +\end{itemize} + +Good point is that for some priors, integral can be taken analytically. + +\subsection{Pipeline} +So the pipeline is following: + +\begin{itemize} + \item Perform a "naive" estimation to obtain initial $f$ + \item Using a sliding window, build local histograms of $f$ to estimate the local prior $p(f)$. Normalize and optionally smooth these histograms. + \item Use the Bayesian estimator for $E\left[ f \right]$ to refine $f$, based on the current prior + \item Iterate the prior estimation and refinement steps until convergence or acceptable error level +\end{itemize} + +\section{True signal obtain} + +During previous step we studied how to obtain signal $f$. But it contains sky light pollution: +\begin{eqnarray} + f = f_{sky} + f_{signal} \\ + \lambda(f_{signal}) = f_{dark} + \nu \left( f_{sky} + f_{signal} \right) +\end{eqnarray} + +So, we need to estimate $f_{sky}$. + +\subsection{Sky estimation} +Firstly, let's using formula $\lambda(f) = f_{dark} + \nu f$ find whole captured signal $f$. + +We can estimate $f_{sky}$ from $f$ using regions on the side of image, where target signal is expected to be absent. +But we should be careful, not to overestimate. + +\subsection{Bayesian analysis} + +So, now we use $\lambda(f_{signal}) = f_{dark} + \nu \left( f_{sky} + f_{signal} \right)$ to find $f_{signal}$. + +\subsection{Pipeline} +So the pipeline is following: + +\begin{itemize} + \item obtain whole signal $f(x,y)$ + \item find $f_{sky}$ from $f(x,y)$ + \item find expected $f_{signal}$ +\end{itemize} + +\section{Narrowband signal extraction} + +In this case we have 2 signal images, captured with narrow (1) and wide (2) filters. Whole signal has 2 components: $f_{c,*}$ --- continuum, +and $f_{n,*}$ --- narrow signal: + +\begin{eqnarray} + f_{signal, 1} = f_{c, 1} + f_{n} \\ + f_{signal, 2} = f_{c, 2} + f_{n} +\end{eqnarray} + +And we assume that continuum for both cases is proportional. We can assume that if wide filter not too wide. +\begin{equation} + f_{c,2} = k \cdot f_{c,1} = k \cdot f_c +\end{equation}, where $k > 1$. Value $k$ we obtain from calibration by stars --- they emit only continuum. + +So +\begin{eqnarray} + f_{signal, 1} = f_c + f_n \\ + f_{signal, 2} = k \cdot f_c + f_n +\end{eqnarray} + +So we can find $f_n$ as +\begin{eqnarray} + (1-1/k) \cdot f_n = f_{signal, 1} - \frac{f_{signal, 2}}{k} \\ + f_n = \frac{k \cdot f_{signal, 1} - f_{signal, 2}}{k-1} \\ + f_c = f_{signal,1} - f_n = \frac{f_{signal, 2} - f_{signal, 1}}{k-1} +\end{eqnarray} + +But as in previous parts, it's also a first approach. + +\subsection{Bayesian analysis} + +\begin{eqnarray} + F_{i,1} = Poisson\left( \lambda_{i,1}\left({f_n \atop f_c}\right) \right) \\ + F_{j,2} = Poisson\left( \lambda_{j,2}\left({f_n \atop f_c}\right) \right) +\end{eqnarray} + +\begin{eqnarray} + \lambda_{i,1}\left({f_n \atop f_c}\right) = f_{dark, 1, i} + \nu_{1,i} \cdot \left( f_{sky, 1, i} + f_c + f_n \right) \\ + \lambda_{j,2}\left({f_n \atop f_c}\right) = f_{dark, 2, j} + \nu_{2,j} \cdot \left( f_{sky, 2, j} + k \cdot f_c + f_n \right) +\end{eqnarray} + +We can concatinate $\left\{F_{i,1}, F_{j,2}\right\}$ frames to single frames list and +$\left\{ \lambda_{i,1}, \lambda_{j,2} \right\}$ to single $\lambda$ list, and like in (\ref{eq:bayes3}) we get: + +\begin{eqnarray} +p\left({f_n \atop f_c} | \left\{ F_{i,1}, F_{j,2} \right\}\right) = +\left( \int e^{ + -\sum_i \left[\Lambda_{i,1}\left({f'_n \atop f'_c}\right) - \Lambda_{i,1}\left({f_n \atop f_c}\right) \right] + -\sum_j \left[\Lambda_{j,2}\left({f'_n \atop f'_c}\right) - \Lambda_{j,2}\left({f_n \atop f_c}\right) \right]} +\cdot \frac{p\left({f'_n \atop f'_c}\right)}{p\left({f_n \atop f_c}\right)} df'_n df'_c \right)^{-1} +\end{eqnarray} + +\end{document} diff --git a/pyproject.toml b/pyproject.toml index eece430..300ac8b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,11 @@ build-backend = "py_build_cmake.build" [project] name = "vstarstack" +<<<<<<< HEAD version = "0.3.7.11" +======= +version = "0.3.8" +>>>>>>> 4006f6e (Test bayes) requires-python = ">=3.10" authors = [{name="Vladislav Tsendrovskii", email="vtcendrovskii@gmail.com"}] description = 'Stacking astrophotos' @@ -36,10 +40,11 @@ directory = "src/" name = "vstarstack" [tool.py-build-cmake.cmake] +build_type = "RelWithDebInfo" source_path = "src/c_modules" install_components = [ "python_modules" ] [tool.py-build-cmake.sdist] -include = ["src/c_modules/*"] \ No newline at end of file +include = ["src/c_modules/*"] diff --git a/src/c_modules/CMakeLists.txt b/src/c_modules/CMakeLists.txt index f76308c..cec234f 100644 --- a/src/c_modules/CMakeLists.txt +++ b/src/c_modules/CMakeLists.txt @@ -1,6 +1,11 @@ cmake_minimum_required(VERSION 3.10) project(vstarstack) +if (BUILD_TESTS) + enable_testing() + include(CTest) +endif() + find_package(Python3 REQUIRED COMPONENTS Interpreter Development.Module) message("Python: '${Python3_EXECUTABLE}'") @@ -12,10 +17,10 @@ if(NUMPY_NOT_FOUND) message(FATAL_ERROR "NumPy headers not found: ${NUMPY_NOT_FOUND}") endif() - message("Numpy headers: '${NUMPY_INCLUDE_DIR}'") add_subdirectory(projections) add_subdirectory(movements) add_subdirectory(fine_movement) add_subdirectory(clusterization) +add_subdirectory(bayes) diff --git a/src/c_modules/bayes/CMakeLists.txt b/src/c_modules/bayes/CMakeLists.txt new file mode 100644 index 0000000..9964ca8 --- /dev/null +++ b/src/c_modules/bayes/CMakeLists.txt @@ -0,0 +1,13 @@ +cmake_minimum_required(VERSION 3.10) + +add_subdirectory(libbayes) + +Python3_add_library(bayes_py MODULE module.c WITH_SOABI) + +target_include_directories(bayes_py PUBLIC lib "${NUMPY_INCLUDE_DIR}") +target_link_libraries(bayes_py PUBLIC bayes) +set_target_properties(bayes_py PROPERTIES OUTPUT_NAME "bayes") + +install(TARGETS bayes_py + COMPONENT python_modules + DESTINATION ${PY_BUILD_CMAKE_IMPORT_NAME}/library/bayes) diff --git a/src/c_modules/bayes/libbayes/CMakeLists.txt b/src/c_modules/bayes/libbayes/CMakeLists.txt new file mode 100644 index 0000000..068a820 --- /dev/null +++ b/src/c_modules/bayes/libbayes/CMakeLists.txt @@ -0,0 +1,11 @@ +cmake_minimum_required(VERSION 3.10) +project(bayes) + +add_library(bayes STATIC bayes.c) +target_include_directories(bayes INTERFACE .) + +if (BUILD_TESTS) + enable_testing() + include(CTest) + add_subdirectory(tests) +endif() diff --git a/src/c_modules/bayes/libbayes/bayes.c b/src/c_modules/bayes/libbayes/bayes.c new file mode 100644 index 0000000..4a657c5 --- /dev/null +++ b/src/c_modules/bayes/libbayes/bayes.c @@ -0,0 +1,321 @@ +/* + * Copyright (c) 2025 Vladislav Tsendrovskii + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 3 of the License. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "bayes.h" +#include +#include +#include + +static void init_index(int num_dim, int *index) +{ + memset(index, 0, sizeof(int) * num_dim); +} + +/** + * @brief go to next integration or iteration position + */ +static bool next_index(int num_dim, const int *index_max, int *index) +{ + int i; + index[0]++; + for (i = 0; i < num_dim - 1; i++) + { + if (index[i] >= index_max[i]) + { + index[i] = 0; + index[i + 1]++; + } + else + { + return true; + } + } + if (index[num_dim - 1] >= index_max[num_dim - 1]) + return false; + return true; +} + +/** + * @brief convert index in interation space to {f_i} + */ +static void index_2_f(int num_dim, const int *index, const double *low, double dl, double *f) +{ + int i; + for (i = 0; i < num_dim; i++) + f[i] = low[i] + dl * index[i]; +} + +/** + * @brief Calculate Sum of all Lambda({f_i}) over all frames (see formulas 21, 22) + */ +static double SumLambda(int N, + int num_frames, + const uint64_t *F, + const double *f, + const double *d, + const double *v, + const double *K) +{ + double sum = 0; + int i, k; + for (i = 0; i < num_frames; i++) + { + double item = d[i]; + for (k = 0; k < N; k++) + item += v[i] * K[i * N + k] * f[k]; + sum += F[i] * log(item) - item; + } + return sum; +} + +/** + * @brief Calculate posterior probability item which to be integrated (see formula 22) + */ +static double posterior_item(int N, + int num_frames, + const uint64_t *F, + const double *f, + const double *f_integration, + const double *d, + const double *v, + const double *K) +{ + double Lambdas_f_posterior = SumLambda(N, num_frames, F, f, d, v, K); + double Lambdas_f_integration = SumLambda(N, num_frames, F, f_integration, d, v, K); + return exp(Lambdas_f_integration - Lambdas_f_posterior); +} + +/** + * @brief Calculate posterior probability p({f_i}|{F_a}) (see formula 22) + */ +static double _bayes_posterior(struct bayes_posterior_ctx_s *ctx, + int num_frames, + const uint64_t *F, + const double *f, + const double *d, + const double *v, + const double *K, + const apriori_f apriori, + const void *apriori_params, + const double *limits_low, + const int *index_max, + double dl) +{ + double apriori_f = apriori(f, ctx->N, apriori_params); + if (apriori_f > -1e-12 && apriori_f < 1e-12) + return 0; + double s = 0; + double dln = pow(dl, ctx->N); + init_index(ctx->N, ctx->index_integration); + do + { + index_2_f(ctx->N, ctx->index_integration, limits_low, dl, ctx->f_integration); + double apriori_f_integration = apriori(ctx->f_integration, ctx->N, apriori_params); + if (apriori_f_integration > -1e-12 && apriori_f_integration < 1e-12) + continue; + double item = posterior_item(ctx->N, num_frames, F, f, ctx->f_integration, d, v, K) * apriori_f_integration; + s += item * dln; + } while (next_index(ctx->N, index_max, ctx->index_integration)); + if (s < 1e-14) { + // We are inegrating over wrong area where all probabilies are too low + return 0; + } + double posterior = apriori_f / s; + if (posterior > 1) + return 1; + if (posterior < 0) + return 0; + return posterior; +} + +/** + * @brief find maximal value for each component of iteration index + */ +static void bayes_index_max(int N, + const double *limits_low, + const double *limits_high, + double dl, + int *index_max) +{ + int i; + for (i = 0; i < N; i++) { + index_max[i] = ceil((limits_high[i] - limits_low[i]) / dl); + } +} + +/** + * @brief Calculate posterior probability p({f_i}|{F_a}) (see formula 22) + */ +double bayes_posterior(struct bayes_posterior_ctx_s *ctx, + int num_frames, + const uint64_t *F, + const double *f, + const double *d, + const double *v, + const double *K, + const apriori_f apriori, + const void *apriori_params, + const double *limits_low, + const double *limits_high, + double dl) +{ + bayes_index_max(ctx->N, limits_low, limits_high, dl, ctx->index_max); + return _bayes_posterior(ctx, num_frames, F, f, d, v, K, apriori, apriori_params, limits_low, ctx->index_max, dl); +} + +/** + * @brief Find such {f_i} where p({f_i} | {F_a}) is maximal + */ +void bayes_maxp(struct bayes_posterior_ctx_s *ctx, + int num_frames, + const uint64_t *F, + const double *d, + const double *v, + const double *K, + const apriori_f apriori, + const void *apriori_params, + const double *limits_low, + const double *limits_high, + double dl, + double *f) +{ + double maxp = 0; + bayes_index_max(ctx->N, limits_low, limits_high, dl, ctx->index_max); + init_index(ctx->N, ctx->index_estimation); + do + { + index_2_f(ctx->N, ctx->index_estimation, limits_low, dl, ctx->f_estimation); + double p = _bayes_posterior(ctx, + num_frames, F, + ctx->f_estimation, + d, v, K, + apriori, apriori_params, + limits_low, ctx->index_max, dl); + if (p > maxp) + { + memcpy(f, ctx->f_estimation, sizeof(double) * ctx->N); + maxp = p; + } + } while (next_index(ctx->N, ctx->index_max, ctx->index_estimation)); +} + +/** + * @brief Find estimation E[{f_i}] by integral over all {f_i} of p({f_i}|{F_a}) * {f_i} + */ +void bayes_estimate(struct bayes_posterior_ctx_s *ctx, + int num_frames, + const uint64_t *F, + const double *d, + const double *v, + const double *K, + const apriori_f apriori, + const void *apriori_params, + const double *limits_low, + const double *limits_high, + double dl, + double clip, + double *f) +{ + double maxp = 0; + + bayes_index_max(ctx->N, limits_low, limits_high, dl, ctx->index_max); + /* If clip > 0 estimate take into account only points near to maximal position, + where posterior > maximal * clip + */ + if (clip > 0) + { + init_index(ctx->N, ctx->index_estimation); + do + { + index_2_f(ctx->N, ctx->index_estimation, limits_low, dl, ctx->f_estimation); + double p = _bayes_posterior(ctx, + num_frames, F, + ctx->f_estimation, + d, v, K, + apriori, apriori_params, + limits_low, ctx->index_max, dl); + if (p > maxp) + maxp = p; + } while (next_index(ctx->N, ctx->index_max, ctx->index_estimation)); + } + + double dln = pow(dl, ctx->N); + int i; + double sump = 0; + memset(f, 0, ctx->N * sizeof(double)); + init_index(ctx->N, ctx->index_estimation); + do + { + index_2_f(ctx->N, ctx->index_estimation, limits_low, dl, ctx->f_estimation); + double p = _bayes_posterior(ctx, + num_frames, F, + ctx->f_estimation, + d, v, K, + apriori, apriori_params, + limits_low, ctx->index_max, dl); + if (p > maxp * clip) + { + for (i = 0; i < ctx->N; i++) + { + f[i] += ctx->f_estimation[i] * p * dln; + sump += p * dln; + } + } + } while (next_index(ctx->N, ctx->index_max, ctx->index_estimation)); + + for (i = 0; i < ctx->N; i++) + { + f[i] /= sump; + } +} + +void bayes_posterior_free(struct bayes_posterior_ctx_s *ctx) +{ + if (ctx->f_integration != NULL) + { + free(ctx->f_integration); + ctx->f_integration = NULL; + } + if (ctx->f_estimation != NULL) + { + free(ctx->f_estimation); + ctx->f_estimation = NULL; + } + if (ctx->index_estimation != NULL) + { + free(ctx->index_estimation); + ctx->index_estimation = NULL; + } + if (ctx->index_integration != NULL) + { + free(ctx->index_integration); + ctx->index_integration = NULL; + } + if (ctx->index_max != NULL) + { + free(ctx->index_max); + ctx->index_max = NULL; + } +} + +bool bayes_posterior_init(struct bayes_posterior_ctx_s *ctx, int num_dim) +{ + ctx->N = num_dim; + ctx->f_integration = calloc(num_dim, sizeof(double)); + ctx->f_estimation = calloc(num_dim, sizeof(double)); + ctx->index_max = calloc(num_dim, sizeof(int)); + ctx->index_estimation = calloc(num_dim, sizeof(int)); + ctx->index_integration = calloc(num_dim, sizeof(int)); + return true; +} diff --git a/src/c_modules/bayes/libbayes/bayes.h b/src/c_modules/bayes/libbayes/bayes.h new file mode 100644 index 0000000..615a1bc --- /dev/null +++ b/src/c_modules/bayes/libbayes/bayes.h @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2025 Vladislav Tsendrovskii + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 3 of the License. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +/// @brief function for apriori esimation p({f_i}), i=0....N-1 +typedef double (*apriori_f)(const double *f, int N, const void *param); + +/** @brief parameters for integration + * Estimation approach: + * 1. iterate {f_i} in some region of R+^N + * 2. for each {f_i} find p(f|F), it requieres integration {f'_i} over R+^N, but we cannot integrate over ifinite region, + * so we integrate over some area in R+^N + * 3. select {f_i} with maximal p(f|F) + */ +struct bayes_posterior_ctx_s +{ + int N; ///< number of dimensions of vector {f_i}, i=0..N-1 + int *index_integration; ///< vector for indexing current {f'_i} during integration + int *index_estimation; ///< vector for indexing current {f_i} during searching maximal p + int *index_max; ///< maximal index values for each index + double *f_integration; ///< vector for {f'_i} during integration + double *f_estimation; ///< vector for keeping {f_i} with maximal p(f|F) during estimation +}; + +/** + * @brief Estimate posterior probability p({p_i}|{F_a}). Here 'i' for components indexing, 'a' for frame indexing + * @param ctx - context + * @param num_frames - number of measurements + * @param F - frames {F_a}, a=0..num_frames-1 + * @param f - {f_i} for which we find p({f_i}|{F_a}) + * @param d - constant coefficients {d_i}, for dark, sky + * @param v - spatial coefficient {v_i}, for flat + * @param K - contribution coefficients {K_i,a} + * @param apriori - apriori p({f_i}) + * @param apriori_params - aux params for apriori function + * @param limits_low - lower limit for integration + * @param limits_high - higher limits for integration + * @param df - integration step + * @return p({p_i} | {F_a}) + */ +double bayes_posterior(struct bayes_posterior_ctx_s *ctx, + int num_frames, + const uint64_t *F, + const double *f, + const double *d, + const double *v, + const double *K, + const apriori_f apriori, + const void *apriori_params, + const double *limits_low, + const double *limits_high, + double dl); + +/** + * @brief Find estimation E[{f_i}] by integral over all {f_i} of p({f_i}|{F_a}) * {f_i} + * @param ctx - context + * @param num_frames - number of measurements + * @param F - frames {F_a}, a=0..num_frames-1 + * @param d - constant coefficients {d_i}, for dark, sky + * @param v - spatial coefficient {v_i}, for flat + * @param K - contribution coefficients {K_i,a} + * @param apriori - apriori p({f_i}) + * @param apriori_params - aux params for apriori function + * @param limits_low - lower limit for integration + * @param limits_high - higher limits for integration + * @param dl - integration step + * @param clip - only use points where posterior > max posterior * clip + * @param f - estimated value + */ +void bayes_estimate(struct bayes_posterior_ctx_s *ctx, + int num_frames, + const uint64_t *F, + const double *d, + const double *v, + const double *K, + const apriori_f apriori, + const void *apriori_params, + const double *limits_low, + const double *limits_high, + double dl, + double clip, + double *f); + +/** + * @brief Find such {f_i} where p({f_i} | {F_a}) is maximal + * @param ctx - context + * @param num_frames - number of measurements + * @param F - frames {F_a}, a=0..num_frames-1 + * @param d - constant coefficients {d_i}, for dark, sky + * @param v - spatial coefficient {v_i}, for flat + * @param K - contribution coefficients {K_i,a} + * @param apriori - apriori p({f_i}) + * @param apriori_params - aux params for apriori function + * @param limits_low - lower limit for integration + * @param limits_high - higher limits for integration + * @param dl - integration step + * @param f - estimated value + */ +void bayes_maxp(struct bayes_posterior_ctx_s *ctx, + int num_frames, + const uint64_t *F, + const double *d, + const double *v, + const double *K, + const apriori_f apriori, + const void *apriori_params, + const double *limits_low, + const double *limits_high, + double dl, + double *f); + +void bayes_posterior_free(struct bayes_posterior_ctx_s *ctx); +bool bayes_posterior_init(struct bayes_posterior_ctx_s *ctx, int num_dim); + +#ifdef __cplusplus +} +#endif diff --git a/src/c_modules/bayes/libbayes/tests/CMakeLists.txt b/src/c_modules/bayes/libbayes/tests/CMakeLists.txt new file mode 100644 index 0000000..8385ee8 --- /dev/null +++ b/src/c_modules/bayes/libbayes/tests/CMakeLists.txt @@ -0,0 +1,29 @@ +cmake_minimum_required(VERSION 3.14) + +if (BUILD_TESTS) + set(INSTALL_GTEST OFF) + include(FetchContent) + FetchContent_Declare( + googletest + URL https://github.com/google/googletest/archive/03597a01ee50ed33e9dfd640b249b4be3799d395.zip + ) + + # For Windows: Prevent overriding the parent project's compiler/linker settings + set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) + FetchContent_MakeAvailable(googletest) + + enable_testing() + add_executable( + bayes_test + bayes.cc + ) + + target_link_libraries( + bayes_test + GTest::gtest_main + bayes + ) + + include(GoogleTest) + gtest_discover_tests(bayes_test) +endif() diff --git a/src/c_modules/bayes/libbayes/tests/bayes.cc b/src/c_modules/bayes/libbayes/tests/bayes.cc new file mode 100644 index 0000000..29cd08a --- /dev/null +++ b/src/c_modules/bayes/libbayes/tests/bayes.cc @@ -0,0 +1,31 @@ +#include +#include + +extern "C" double + +TEST(dim_1, aposteriori) { + struct bayes_posterior_ctx_s ctx; + bayes_posterior_init(&ctx, 1); + + uint64_t F[] = {1, 1, 1, 1, 1}; // Samples + double f[1] = {1}; // target for which find aposteriori + double d[1] = {0}; // darks + double v[1] = {1}; // flats + double Ks[1] = {1}; // K + double lows[1] = {0}; + double highs[1] = {10}; + double dl = 0.1; + + auto apriori = [](const double *f, int N, const void *param) -> double + { + if (f[0] < 0) + return 0; + if (f[0] > 10) + return 0; + return 1.0/10; + }; + + double posterior = bayes_posterior(&ctx, sizeof(F)/sizeof(F[0]), F, f, d, v, Ks, apriori, NULL, lows, highs, dl); + + bayes_posterior_free(&ctx); +} diff --git a/src/c_modules/bayes/module.c b/src/c_modules/bayes/module.c new file mode 100644 index 0000000..848befe --- /dev/null +++ b/src/c_modules/bayes/module.c @@ -0,0 +1,664 @@ +/* + * Copyright (c) 2025 Vladislav Tsendrovskii + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 3 of the License. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#define PY_SSIZE_T_CLEAN +#include +#include + +#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION +#include +#include + +#include +#include + +#include "bayes.h" + +#define BASENAME "vstarstack.library.bayes.bayes" + +struct apriori_params_s +{ + PyObject *apriori; + PyObject *param_dict; + double max_f; +}; + +enum BayesAprioriType_e +{ + APRIORI_UNKNOWN = 0, + APRIORI_CALLABLE, + APRIORI_UNIFORM, + APRIORI_GAMMA, +}; + +struct BayesEstimatorObject +{ + PyObject_HEAD PyObject *apriori_callable_object; + apriori_f apriori; + double dl; + enum BayesAprioriType_e apriori_type; + int num_dim; + struct bayes_posterior_ctx_s ctx; +}; + +double call_apriori(const double *f, int num_dim, const void *param) +{ + const struct apriori_params_s *params_s = param; + + PyObject *apriori = params_s->apriori; + PyObject *param_dict = params_s->param_dict; + + npy_intp dims[1] = {num_dim}; + + PyObject *f_ndarray = PyArray_SimpleNewFromData( + 1, // ndim + dims, // dimensions + NPY_DOUBLE, // dtype + (void *)f // pointer to your double data + ); + + if (!f_ndarray) + { + PyErr_SetString(PyExc_RuntimeError, "Failed to create NumPy array"); + return 0; + } + + PyObject *args = PyTuple_New(2); + Py_INCREF(f_ndarray); // Tuple steals a reference, so we must increment + Py_INCREF(param_dict); // Tuple steals a reference, so we must increment + PyTuple_SET_ITEM(args, 0, (PyObject *)f_ndarray); + PyTuple_SET_ITEM(args, 1, (PyObject *)param_dict); + + PyObject *result = PyObject_CallObject(apriori, args); + Py_DECREF(args); // Done with args tuple + Py_DECREF(f_ndarray); + + if (!result) + { + PyErr_SetString(PyExc_RuntimeError, "Call to apriori function failed"); + return 0; + } + + double apriori_val = PyFloat_AsDouble(result); + Py_DECREF(result); + + if (PyErr_Occurred()) + { + PyErr_SetString(PyExc_TypeError, "apriori function did not return a double"); + return 0; + } + + return apriori_val; +} + +double uniform_apriori(const double *f, int num_dim, const void *param) +{ + return 1; +} + +static bool validate_shape(const PyArrayObject *array, int ndim, ...) +{ + if (PyArray_NDIM(array) != ndim) + return false; + + va_list args; + va_start(args, ndim); + int i; + for (i = 0; i < ndim; i++) + { + int expected_dim = va_arg(args, int); + int actual_dim = PyArray_DIM(array, i); + if (actual_dim != expected_dim) + { + va_end(args); + return false; + } + } + va_end(args); + return true; +} + +static PyObject *posterior(PyObject *_self, + PyObject *args, + PyObject *kwds) +{ + PyObject *F; + PyObject *f; + PyObject *lambdas_d, *lambdas_v, *lambdas_K; + PyObject *apriori_params; + PyObject *limits_low, *limits_high; + + struct BayesEstimatorObject *self = (struct BayesEstimatorObject *)_self; + + static char *kwlist[] = {"F", "f", "lambdas_d", "lambdas_v", "lambdas_K", "apriori_params", "limits_low", "limits_high", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwds, "OOOOOOOO", kwlist, + &F, + &f, + &lambdas_d, &lambdas_v, &lambdas_K, + &apriori_params, + &limits_low, &limits_high)) + { + Py_INCREF(Py_None); + return Py_None; + } + + PyArrayObject *arr_F = (PyArrayObject *)PyArray_FROM_OTF(F, NPY_UINT64, NPY_ARRAY_IN_ARRAY); + PyArrayObject *arr_f = (PyArrayObject *)PyArray_FROM_OTF(f, NPY_DOUBLE, NPY_ARRAY_IN_ARRAY); + PyArrayObject *arr_lambdas_d = (PyArrayObject *)PyArray_FROM_OTF(lambdas_d, NPY_DOUBLE, NPY_ARRAY_IN_ARRAY); + PyArrayObject *arr_lambdas_v = (PyArrayObject *)PyArray_FROM_OTF(lambdas_v, NPY_DOUBLE, NPY_ARRAY_IN_ARRAY); + PyArrayObject *arr_lambdas_K = (PyArrayObject *)PyArray_FROM_OTF(lambdas_K, NPY_DOUBLE, NPY_ARRAY_IN_ARRAY); + PyArrayObject *arr_limits_low = (PyArrayObject *)PyArray_FROM_OTF(limits_low, NPY_DOUBLE, NPY_ARRAY_IN_ARRAY); + PyArrayObject *arr_limits_high = (PyArrayObject *)PyArray_FROM_OTF(limits_high, NPY_DOUBLE, NPY_ARRAY_IN_ARRAY); + + if (!arr_F || !arr_f || !arr_lambdas_d || !arr_lambdas_v || !arr_limits_low || !arr_limits_high) + { + PyErr_SetString(PyExc_TypeError, "Failed to convert inputs to NumPy arrays"); + goto fail; + } + + // arr_F: [num_frames] + if (PyArray_NDIM(arr_F) != 1) + { + PyErr_SetString(PyExc_ValueError, "F must be a 1D array"); + goto fail; + } + int num_frames = PyArray_DIM(arr_F, 0); + + // arr_f: [num_frames] + if (!validate_shape(arr_f, 1, self->num_dim)) + { + PyErr_SetString(PyExc_ValueError, "f must be shape [num_dim]"); + goto fail; + } + + // arr_lambdas_d: [num_dim] + if (!validate_shape(arr_lambdas_d, 1, num_frames)) + { + PyErr_SetString(PyExc_ValueError, "lambdas_d must be shape [num_frames]"); + goto fail; + } + + // arr_lambdas_v: [num_frames] + if (!validate_shape(arr_lambdas_v, 1, num_frames)) + { + PyErr_SetString(PyExc_ValueError, "lambdas_v must be shape [num_frames]"); + goto fail; + } + + // arr_lambdas_K: [num_frames, num_dim] + if (!validate_shape(arr_lambdas_K, 2, num_frames, self->num_dim)) + { + PyErr_SetString(PyExc_ValueError, "lambdas_K must be shape [num_frames, num_dim]"); + goto fail; + } + + // arr_limits_low: [num_dim] + if (!validate_shape(arr_limits_low, 1, self->num_dim)) + { + PyErr_SetString(PyExc_ValueError, "limits_low must be shape [num_dim]"); + goto fail; + } + + // arr_limits_high: [num_dim] + if (!validate_shape(arr_limits_high, 1, self->num_dim)) + { + PyErr_SetString(PyExc_ValueError, "limits_high must be shape [num_dim]"); + goto fail; + } + + uint64_t *F_data = (uint64_t *)PyArray_DATA(arr_F); + double *f_data = (double *)PyArray_DATA(arr_f); + double *lambdas_d_data = (double *)PyArray_DATA(arr_lambdas_d); + double *lambdas_v_data = (double *)PyArray_DATA(arr_lambdas_v); + double *lambdas_K_data = (double *)PyArray_DATA(arr_lambdas_K); + double *limits_low_data = (double *)PyArray_DATA(arr_limits_low); + double *limits_high_data = (double *)PyArray_DATA(arr_limits_high); + + struct apriori_params_s params; + switch (self->apriori_type) + { + case APRIORI_CALLABLE: + params.apriori = self->apriori_callable_object; + params.param_dict = apriori_params; + break; + case APRIORI_UNIFORM: + break; + default: + PyErr_SetString(PyExc_TypeError, "Invalid apriori type"); + goto fail; + } + + double p = bayes_posterior(&self->ctx, + num_frames, + F_data, + f_data, + lambdas_d_data, + lambdas_v_data, + lambdas_K_data, + self->apriori, ¶ms, + limits_low_data, + limits_high_data, + self->dl); + + Py_DECREF(arr_F); + Py_DECREF(arr_f); + Py_DECREF(arr_lambdas_d); + Py_DECREF(arr_lambdas_v); + Py_DECREF(arr_limits_low); + Py_DECREF(arr_limits_high); + return PyFloat_FromDouble(p); +fail: + Py_XDECREF(arr_F); + Py_XDECREF(arr_f); + Py_XDECREF(arr_lambdas_d); + Py_XDECREF(arr_lambdas_v); + Py_XDECREF(arr_limits_low); + Py_XDECREF(arr_limits_high); + return NULL; +} + +static PyObject *maxp(PyObject *_self, + PyObject *args, + PyObject *kwds) +{ + PyObject *F; + PyObject *lambdas_d, *lambdas_v, *lambdas_K; + PyObject *apriori_params; + PyObject *limits_low, *limits_high; + + struct BayesEstimatorObject *self = (struct BayesEstimatorObject *)_self; + + static char *kwlist[] = {"F", "lambdas_d", "lambdas_v", "lambdas_K", "apriori_params", "limits_low", "limits_high", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwds, "OOOOOOO", kwlist, + &F, + &lambdas_d, &lambdas_v, &lambdas_K, + &apriori_params, + &limits_low, &limits_high)) + { + Py_INCREF(Py_None); + return Py_None; + } + + PyArrayObject *arr_F = (PyArrayObject *)PyArray_FROM_OTF(F, NPY_UINT64, NPY_ARRAY_IN_ARRAY); + PyArrayObject *arr_lambdas_d = (PyArrayObject *)PyArray_FROM_OTF(lambdas_d, NPY_DOUBLE, NPY_ARRAY_IN_ARRAY); + PyArrayObject *arr_lambdas_v = (PyArrayObject *)PyArray_FROM_OTF(lambdas_v, NPY_DOUBLE, NPY_ARRAY_IN_ARRAY); + PyArrayObject *arr_lambdas_K = (PyArrayObject *)PyArray_FROM_OTF(lambdas_K, NPY_DOUBLE, NPY_ARRAY_IN_ARRAY); + PyArrayObject *arr_limits_low = (PyArrayObject *)PyArray_FROM_OTF(limits_low, NPY_DOUBLE, NPY_ARRAY_IN_ARRAY); + PyArrayObject *arr_limits_high = (PyArrayObject *)PyArray_FROM_OTF(limits_high, NPY_DOUBLE, NPY_ARRAY_IN_ARRAY); + + if (!arr_F || !arr_lambdas_d || !arr_lambdas_v || !arr_limits_low || !arr_limits_high) + { + PyErr_SetString(PyExc_TypeError, "Failed to convert inputs to NumPy arrays"); + goto fail; + } + + + // arr_F: [num_frames] + if (PyArray_NDIM(arr_F) != 1) + { + PyErr_SetString(PyExc_ValueError, "F must be a 1D array"); + goto fail; + } + int num_frames = PyArray_DIM(arr_F, 0); + + // arr_lambdas_d: [num_frames] + if (!validate_shape(arr_lambdas_d, 1, num_frames)) + { + PyErr_SetString(PyExc_ValueError, "lambdas_d must be shape [num_frames]"); + goto fail; + } + + // arr_lambdas_v: [num_frames] + if (!validate_shape(arr_lambdas_v, 1, num_frames)) + { + PyErr_SetString(PyExc_ValueError, "lambdas_v must be shape [num_frames]"); + goto fail; + } + + // arr_lambdas_K: [num_frames, num_dim] + if (!validate_shape(arr_lambdas_K, 2, num_frames, self->num_dim)) + { + PyErr_SetString(PyExc_ValueError, "lambdas_K must be shape [num_frames, num_dim]"); + goto fail; + } + + // arr_limits_low: [num_dim] + if (!validate_shape(arr_limits_low, 1, self->num_dim)) + { + PyErr_SetString(PyExc_ValueError, "limits_low must be shape [num_dim]"); + goto fail; + } + + // arr_limits_high: [num_dim] + if (!validate_shape(arr_limits_high, 1, self->num_dim)) + { + PyErr_SetString(PyExc_ValueError, "limits_high must be shape [num_dim]"); + goto fail; + } + + uint64_t *F_data = (uint64_t *)PyArray_DATA(arr_F); + double *lambdas_d_data = (double *)PyArray_DATA(arr_lambdas_d); + double *lambdas_v_data = (double *)PyArray_DATA(arr_lambdas_v); + double *lambdas_K_data = (double *)PyArray_DATA(arr_lambdas_K); + double *limits_low_data = (double *)PyArray_DATA(arr_limits_low); + double *limits_high_data = (double *)PyArray_DATA(arr_limits_high); + + struct apriori_params_s params; + switch (self->apriori_type) + { + case APRIORI_CALLABLE: + params.apriori = self->apriori_callable_object; + params.param_dict = apriori_params; + break; + case APRIORI_UNIFORM: + break; + default: + PyErr_SetString(PyExc_TypeError, "Invalid apriori type"); + goto fail; + } + + npy_intp dims[1] = {self->num_dim}; + PyArrayObject *f_ndarray = (PyArrayObject *)PyArray_SimpleNew( + 1, // ndim + dims, // dimensions + NPY_DOUBLE // dtype + ); + + double *f = (double *)PyArray_DATA(f_ndarray); + bayes_maxp(&self->ctx, + num_frames, + F_data, + lambdas_d_data, + lambdas_v_data, + lambdas_K_data, + self->apriori, ¶ms, + limits_low_data, + limits_high_data, + self->dl, + f); + Py_DECREF(arr_F); + Py_DECREF(arr_lambdas_d); + Py_DECREF(arr_lambdas_v); + Py_DECREF(arr_limits_low); + Py_DECREF(arr_limits_high); + return (PyObject *)f_ndarray; +fail: + Py_XDECREF(arr_F); + Py_XDECREF(arr_lambdas_d); + Py_XDECREF(arr_lambdas_v); + Py_XDECREF(arr_limits_low); + Py_XDECREF(arr_limits_high); + return NULL; +} + +static PyObject *estimate(PyObject *_self, + PyObject *args, + PyObject *kwds) +{ + PyObject *F; + PyObject *lambdas_d, *lambdas_v, *lambdas_K; + PyObject *apriori_params; + PyObject *limits_low, *limits_high; + double clip; + + struct BayesEstimatorObject *self = (struct BayesEstimatorObject *)_self; + + static char *kwlist[] = {"F", "lambdas_d", "lambdas_v", "lambdas_K", "apriori_params", "limits_low", "limits_high", "clip", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwds, "OOOOOOOd", kwlist, + &F, + &lambdas_d, &lambdas_v, &lambdas_K, + &apriori_params, + &limits_low, &limits_high, + &clip)) + { + Py_INCREF(Py_None); + return Py_None; + } + + PyArrayObject *arr_F = (PyArrayObject *)PyArray_FROM_OTF(F, NPY_UINT64, NPY_ARRAY_IN_ARRAY); + PyArrayObject *arr_lambdas_d = (PyArrayObject *)PyArray_FROM_OTF(lambdas_d, NPY_DOUBLE, NPY_ARRAY_IN_ARRAY); + PyArrayObject *arr_lambdas_v = (PyArrayObject *)PyArray_FROM_OTF(lambdas_v, NPY_DOUBLE, NPY_ARRAY_IN_ARRAY); + PyArrayObject *arr_lambdas_K = (PyArrayObject *)PyArray_FROM_OTF(lambdas_K, NPY_DOUBLE, NPY_ARRAY_IN_ARRAY); + PyArrayObject *arr_limits_low = (PyArrayObject *)PyArray_FROM_OTF(limits_low, NPY_DOUBLE, NPY_ARRAY_IN_ARRAY); + PyArrayObject *arr_limits_high = (PyArrayObject *)PyArray_FROM_OTF(limits_high, NPY_DOUBLE, NPY_ARRAY_IN_ARRAY); + + if (!arr_F || !arr_lambdas_d || !arr_lambdas_v || !arr_limits_low || !arr_limits_high) + { + PyErr_SetString(PyExc_TypeError, "Failed to convert inputs to NumPy arrays"); + goto fail; + } + + // arr_F: [num_frames] + if (PyArray_NDIM(arr_F) != 1) + { + PyErr_SetString(PyExc_ValueError, "F must be a 1D array"); + goto fail; + } + int num_frames = PyArray_DIM(arr_F, 0); + + // arr_lambdas_d: [num_frames] + if (!validate_shape(arr_lambdas_d, 1, num_frames)) + { + PyErr_SetString(PyExc_ValueError, "lambdas_d must be shape [num_frames]"); + goto fail; + } + + // arr_lambdas_v: [num_frames] + if (!validate_shape(arr_lambdas_v, 1, num_frames)) + { + PyErr_SetString(PyExc_ValueError, "lambdas_v must be shape [num_frames]"); + goto fail; + } + + // arr_lambdas_K: [num_frames, num_dim] + if (!validate_shape(arr_lambdas_K, 2, num_frames, self->num_dim)) + { + PyErr_SetString(PyExc_ValueError, "lambdas_K must be shape [num_frames, num_dim]"); + goto fail; + } + + // arr_limits_low: [num_dim] + if (!validate_shape(arr_limits_low, 1, self->num_dim)) + { + PyErr_SetString(PyExc_ValueError, "limits_low must be shape [num_dim]"); + goto fail; + } + + // arr_limits_high: [num_dim] + if (!validate_shape(arr_limits_high, 1, self->num_dim)) + { + PyErr_SetString(PyExc_ValueError, "limits_high must be shape [num_dim]"); + goto fail; + } + + uint64_t *F_data = (uint64_t *)PyArray_DATA(arr_F); + double *lambdas_d_data = (double *)PyArray_DATA(arr_lambdas_d); + double *lambdas_v_data = (double *)PyArray_DATA(arr_lambdas_v); + double *lambdas_K_data = (double *)PyArray_DATA(arr_lambdas_K); + double *limits_low_data = (double *)PyArray_DATA(arr_limits_low); + double *limits_high_data = (double *)PyArray_DATA(arr_limits_high); + + struct apriori_params_s params; + switch (self->apriori_type) + { + case APRIORI_CALLABLE: + params.apriori = self->apriori_callable_object; + params.param_dict = apriori_params; + break; + case APRIORI_UNIFORM: + break; + default: + PyErr_SetString(PyExc_TypeError, "Invalid apriori type"); + goto fail; + } + + npy_intp dims[1] = {self->num_dim}; + + PyArrayObject *f_ndarray = (PyArrayObject *)PyArray_SimpleNew( + 1, // ndim + dims, // dimensions + NPY_DOUBLE // dtype + ); + + if (clip > 1) + clip = 1; + if (clip < 0) + clip = 0; + + double *f = (double *)PyArray_DATA(f_ndarray); + bayes_estimate(&self->ctx, + num_frames, + F_data, + lambdas_d_data, + lambdas_v_data, + lambdas_K_data, + self->apriori, ¶ms, + limits_low_data, + limits_high_data, + self->dl, clip, + f); + Py_DECREF(arr_F); + Py_DECREF(arr_lambdas_d); + Py_DECREF(arr_lambdas_v); + Py_DECREF(arr_limits_low); + Py_DECREF(arr_limits_high); + return (PyObject *)f_ndarray; +fail: + Py_XDECREF(arr_F); + Py_XDECREF(arr_lambdas_d); + Py_XDECREF(arr_lambdas_v); + Py_XDECREF(arr_limits_low); + Py_XDECREF(arr_limits_high); + return NULL; +} + +static int BayesEstimator_init(PyObject *_self, PyObject *args, PyObject *kwds) +{ + PyObject *apriori; + double dl; + int num_dim; + + static char *kwlist[] = {"apriori", "dl", "ndim", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwds, "Odi", kwlist, &apriori, &dl, &num_dim)) + { + return -1; + } + struct BayesEstimatorObject *self = (struct BayesEstimatorObject *)_self; + + self->apriori_callable_object = NULL; + self->apriori_type = APRIORI_UNKNOWN; + self->dl = dl; + self->num_dim = num_dim; + + // Callable apriori + if (PyCallable_Check(apriori)) + { + self->apriori = call_apriori; + self->apriori_type = APRIORI_CALLABLE; + Py_INCREF(apriori); + self->apriori_callable_object = apriori; + } + + // Uniform apriori + if (PyUnicode_Check(apriori) && !strcmp(PyUnicode_AsUTF8(apriori), "uniform")) + { + self->apriori = uniform_apriori; + self->apriori_type = APRIORI_UNIFORM; + } + + if (self->apriori_type == APRIORI_UNKNOWN) + { + return -1; + } + + if (!bayes_posterior_init(&self->ctx, self->num_dim)) + { + PyErr_SetString(PyExc_ValueError, "initialization error"); + return -1; + } + + return 0; +} + +static void BayesEstimator_dealloc(PyObject *_self) +{ + struct BayesEstimatorObject *self = + (struct BayesEstimatorObject *)_self; + bayes_posterior_free(&self->ctx); + if (self->apriori_callable_object) + { + Py_DECREF(self->apriori_callable_object); + self->apriori_callable_object = NULL; + } +} + +static PyMethodDef BayesEstimator_methods[] = { + {"posterior", (PyCFunction)posterior, METH_VARARGS | METH_KEYWORDS, + "Build Bayes posterior"}, + + {"MAP", (PyCFunction)maxp, METH_VARARGS | METH_KEYWORDS, + "Bayes MAP value"}, + + {"estimate", (PyCFunction)estimate, METH_VARARGS | METH_KEYWORDS, + "Bayes estimate mean value"}, + + {NULL} /* Sentinel */ +}; + +static PyTypeObject bayesEstimator = { + PyVarObject_HEAD_INIT(NULL, 0) + .tp_name = BASENAME ".Estimator", + .tp_doc = PyDoc_STR("Bayes estimator object"), + .tp_basicsize = sizeof(struct BayesEstimatorObject), + .tp_itemsize = 0, + .tp_flags = Py_TPFLAGS_DEFAULT, + .tp_new = PyType_GenericNew, + .tp_init = BayesEstimator_init, + .tp_dealloc = BayesEstimator_dealloc, + .tp_methods = BayesEstimator_methods, +}; + +static PyMethodDef bayes_methods[] = { + {NULL} /* Sentinel */ +}; + +static PyModuleDef bayesModule = { + PyModuleDef_HEAD_INIT, + .m_name = BASENAME, + .m_doc = "Bayes estimation for Poisson distribution", + .m_size = -1, + .m_methods = bayes_methods, +}; + +PyMODINIT_FUNC +PyInit_bayes(void) +{ + import_array(); + if (PyType_Ready(&bayesEstimator) < 0) + return NULL; + + PyObject *m = PyModule_Create(&bayesModule); + if (m == NULL) + return NULL; + + Py_INCREF(&bayesEstimator); + if (PyModule_AddObject(m, "BayesEstimator", (PyObject *)&bayesEstimator) < 0) + { + Py_DECREF(&bayesEstimator); + Py_DECREF(m); + return NULL; + } + + return m; +} diff --git a/src/vstarstack/library/bayes/estimation.py b/src/vstarstack/library/bayes/estimation.py new file mode 100644 index 0000000..6af5da2 --- /dev/null +++ b/src/vstarstack/library/bayes/estimation.py @@ -0,0 +1,300 @@ +"""Bayes estimation methods""" +# +# Copyright (c) 2025 Vladislav Tsendrovskii +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, version 3 of the License. +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the GNU General Public License for more details. +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +from typing import Callable, Iterator, Tuple +import numpy as np +import vstarstack.library.bayes +import vstarstack.library.bayes.bayes + +def _generate_crds(H : int, W : int) -> Iterator[Tuple[int, int]]: + for y in range(H): + for x in range(W): + yield y,x + +def estimate(samples : np.ndarray, + Ks : np.ndarray, + background : np.ndarray, + flat : np.ndarray, + max_signal : np.ndarray, + estimator : vstarstack.library.bayes.bayes.BayesEstimator, + apriori_fun_params : any = None, + clip : float = 0, + method : str = "estimate") -> np.ndarray: + """ + Estimate value with Bayes theorem + + samples - (nsamples, h, w) - + Ks - (nsamples, ndim) - + background - (nsamples, h, w) - + flat - (nsamples, h, w) - + max_signal - (ndim) - + + estimator - + apriori_fun_params - + clip - + method - + + """ + + # lambda(f1, f2) = background + flat * (Ks_1 * f1 + Ks_2 * f2) + + assert len(max_signal.shape) == 1 + ndim = max_signal.shape[0] + max_signal = max_signal.astype(np.double) + min_signal = np.zeros(max_signal.shape, dtype=np.double) + + assert len(samples.shape) == 3 + nsamples = samples.shape[0] + h = samples.shape[1] + w = samples.shape[2] + samples = samples.astype(np.uint) + + assert len(background.shape) == 3 + assert background.shape[0] == nsamples + assert background.shape[1] == h + assert background.shape[2] == w + background = background.astype(np.double) + + assert len(flat.shape) == 3 + assert flat.shape[0] == nsamples + assert flat.shape[1] == h + assert flat.shape[2] == w + background = background.astype(np.double) + + assert len(Ks.shape) == 2 + assert Ks.shape[0] == nsamples + assert Ks.shape[1] == ndim + flat = flat.astype(np.double) + + npixels = w*h + samples = np.reshape(samples, (nsamples, npixels)) + background = np.reshape(background, (nsamples, npixels)) + flat = np.reshape(flat, (nsamples, npixels)) + + result = np.ndarray((npixels, ndim)) + if method == "estimate": + for i in range(npixels): + f = estimator.estimate(samples[:,i], + background[:,i], + flat[:,i], + Ks, + apriori_fun_params, + limits_low=min_signal, + limits_high=max_signal, + clip=clip) + result[i,:] = f + elif method == "MAP": + for i in range(npixels): + f = estimator.MAP(samples[:,i], + background[:,i], + flat[:,i], + Ks, + apriori_fun_params, + limits_low=min_signal, + limits_high=max_signal) + result[i,:] = f + result = np.reshape(result, (h, w, ndim)) + return result + +def estimate_with_dark_flat(samples : np.ndarray, + dark : np.ndarray, + flat : np.ndarray, + max_signal : float, + integration_dl : float, + apriori_fun : any, + apriori_fun_params : any = None, + clip : float = 0, + method : str = "estimate") -> np.ndarray: + assert len(samples.shape) == 3 + nsamples = samples.shape[0] + + # lambda(f) = dark + flat * 1 * f + + if len(dark.shape) == 2: + dks = np.zeros((nsamples, dark.shape[0], dark.shape[0])) + for i in range(nsamples): + dks[i,:,:] = dark + elif len(dark.shape) == 3: + dks = dark + else: + return None + + if len(flat.shape) == 2: + flts = np.zeros((nsamples, flat.shape[0], flat.shape[0])) + for i in range(nsamples): + flts[i,:,:] = flat + elif len(flat.shape) == 3: + flts = flat + else: + return None + + Ks = np.ones((nsamples, 1)) + + _max_signal = np.array([max_signal], dtype=np.double) + + estimator = vstarstack.library.bayes.bayes.BayesEstimator(apriori=apriori_fun, dl=integration_dl, ndim=1) + return estimate(samples, Ks, dks, flts, _max_signal, estimator, apriori_fun_params, clip, method) + +def estimate_with_dark_flat_sky(samples : np.ndarray, + dark : np.ndarray, + flat : np.ndarray, + sky : np.ndarray, + max_signal : float, + integration_dl : float, + apriori_fun : any, + apriori_fun_params : any = None, + clip : float = 0, + method : str = "estimate") -> np.ndarray: + assert len(samples.shape) == 3 + nsamples = samples.shape[0] + + # lambda(f) = (dark + flat * sky) + flat * 1 * f + + if len(dark.shape) == 2: + darks = np.zeros((nsamples, dark.shape[0], dark.shape[0])) + for i in range(nsamples): + darks[i,:,:] = dark + elif len(dark.shape) == 3: + darks = dark + else: + return None + + if len(flat.shape) == 2: + flts = np.zeros((nsamples, flat.shape[0], flat.shape[0])) + for i in range(nsamples): + flts[i,:,:] = flat + elif len(flat.shape) == 3: + flts = flat + else: + return None + + if len(sky.shape) == 2: + skies = np.zeros((nsamples, sky.shape[0], sky.shape[0])) + for i in range(nsamples): + skies[i,:,:] = sky + elif len(sky.shape) == 3: + skies = sky + else: + return None + + bgs = darks + skies * flts + + Ks = np.ones((nsamples, 1)) + + _max_signal = np.array([max_signal], dtype=np.double) + + estimator = vstarstack.library.bayes.bayes.BayesEstimator(apriori=apriori_fun, dl=integration_dl, ndim=1) + return estimate(samples, Ks, bgs, flts, _max_signal, estimator, apriori_fun_params, clip, method) + +def estimate_with_dark_flat_sky_continuum(samples_narrow : np.ndarray, + dark_narrow : np.ndarray, + flat_narrow : np.ndarray, + sky_narrow : np.ndarray, + max_signal_emission : float, + samples_wide : np.ndarray, + dark_wide : np.ndarray, + flat_wide : np.ndarray, + sky_wide : np.ndarray, + max_signal_continuum : float, + wide_narrow_k : float, + integration_dl : float, + apriori_fun : str | Callable[[np.ndarray], float], + apriori_fun_params : any = None, + clip : float = 0, + method : str = "estimate") -> np.ndarray: + assert len(samples_wide.shape) == 3 + assert len(samples_narrow.shape) == 3 + nsamples_wide = samples_wide.shape[0] + nsamples_narrow = samples_narrow.shape[0] + + # lambda_narrow(f) = (dark_narrow + flat_narrow * sky_narrow) + flat_narrow * (1 * f_continuum + 1 * f_emission) + # lambda_wide(f) = (dark_wide + flat_wide * sky_wide) + flat_wide * (k * f_continuum + 1 * f_emission) + + # f = [f_continuum f_emission] + + # prepare parameters for images with wide filter + if len(dark_wide.shape) == 2: + darks_wide = np.zeros((nsamples_wide, dark_wide.shape[0], dark_wide.shape[0])) + for i in range(nsamples_wide): + darks_wide[i,:,:] = dark_wide + elif len(dark_wide.shape) == 3: + darks_wide = dark_wide + else: + return None + + if len(flat_wide.shape) == 2: + flats_wide = np.zeros((nsamples_wide, flat_wide.shape[0], flat_wide.shape[0])) + for i in range(nsamples_wide): + flats_wide[i,:,:] = flat_wide + elif len(flat_wide.shape) == 3: + flats_wide = flat_wide + else: + return None + + if len(sky_wide.shape) == 2: + skies_wide = np.zeros((nsamples_wide, sky_wide.shape[0], sky_wide.shape[0])) + for i in range(nsamples_wide): + skies_wide[i,:,:] = sky_wide + elif len(sky_wide.shape) == 3: + skies_wide = sky_wide + else: + return None + + bgs_wide = darks_wide + skies_wide * flats_wide + + Ks_wide = np.ones((nsamples_wide, 2)) + + # prepare parameters for images with narrow filter + if len(dark_narrow.shape) == 2: + darks_narrow = np.zeros((nsamples_narrow, dark_narrow.shape[0], dark_narrow.shape[0])) + for i in range(nsamples_narrow): + darks_narrow[i,:,:] = dark_narrow + elif len(dark_narrow.shape) == 3: + darks_narrow = dark_narrow + else: + return None + + if len(flat_narrow.shape) == 2: + flats_narrow = np.zeros((nsamples_narrow, flat_narrow.shape[0], flat_narrow.shape[0])) + for i in range(nsamples_narrow): + flats_narrow[i,:,:] = flat_narrow + elif len(flat_narrow.shape) == 3: + flats_narrow = flat_narrow + else: + return None + + if len(sky_narrow.shape) == 2: + skies_narrow = np.zeros((nsamples_narrow, sky_narrow.shape[0], sky_narrow.shape[0])) + for i in range(nsamples_narrow): + skies_narrow[i,:,:] = sky_narrow + elif len(sky_narrow.shape) == 3: + skies_narrow = sky_narrow + else: + return None + + bgs_narrow = darks_narrow + skies_narrow * flats_narrow + + Ks_narrow = np.zeros((nsamples_narrow, 2)) + Ks_narrow[:,0] = wide_narrow_k + Ks_narrow[:,1] = 1 + + # concat all samples into single array + samples = np.concat([samples_wide, samples_narrow], axis=0) + bgs = np.concat([bgs_wide, bgs_narrow], axis=0) + flats = np.concat([flats_wide, flats_narrow], axis=0) + Ks = np.concat([Ks_wide, Ks_narrow], axis=0) + max_signal = np.array([max_signal_emission, max_signal_continuum]) + estimator = vstarstack.library.bayes.bayes.BayesEstimator(apriori=apriori_fun, dl=integration_dl, ndim=2) + return estimate(samples, Ks, bgs, flats, max_signal, estimator, apriori_fun_params, clip, method) diff --git a/src/vstarstack/library/movement/move_image.py b/src/vstarstack/library/movement/move_image.py index 468bcda..0f845b6 100644 --- a/src/vstarstack/library/movement/move_image.py +++ b/src/vstarstack/library/movement/move_image.py @@ -118,7 +118,7 @@ def move_dataframe(dataframe: DataFrame, if not dataframe.get_channel_option(channel, "signal"): continue - if channel in dataframe.links["weight"]: + if "weight" in dataframe.links and channel in dataframe.links["weight"]: weight_channel = dataframe.links["weight"][channel] weight, _ = dataframe.get_channel(weight_channel) else: diff --git a/src/vstarstack/tool/image_processing/fixes.py b/src/vstarstack/tool/image_processing/fixes.py index d448201..0f5d0ee 100644 --- a/src/vstarstack/tool/image_processing/fixes.py +++ b/src/vstarstack/tool/image_processing/fixes.py @@ -53,5 +53,5 @@ def copy(project: vstarstack.tool.cfg.Project, argv: list): "blur": (vstarstack.tool.image_processing.blur.run, "gaussian blur"), "deconvolution": (vstarstack.tool.image_processing.deconvolution.commands, "deconvolution"), "select-sharp" : (vstarstack.tool.image_processing.drop_unsharp.commands, "select sharp images"), - "remove-continuum" : (vstarstack.tool.image_processing.remove_continuum.process, "remove continuum", "input.zip Narrow Wide output.zip"), + "remove-continuum" : (vstarstack.tool.image_processing.remove_continuum.process, "remove continuum", "input.zip Narrow Wide output.zip [coeff]"), } diff --git a/tests/test_bayes.py b/tests/test_bayes.py new file mode 100644 index 0000000..20ec06b --- /dev/null +++ b/tests/test_bayes.py @@ -0,0 +1,194 @@ +# +# Copyright (c) 2025 Vladislav Tsendrovskii +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, version 3 of the License. +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the GNU General Public License for more details. +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import numpy as np +from vstarstack.library.bayes.bayes import BayesEstimator +from vstarstack.library.bayes.estimation import * + +def test_MAP_single_value(): + f_signal_max = 10 + dl = 0.1 + dark = 5 + signal = 3 + v = 1 + K = 1 + samples = np.array([dark + signal * v * K], dtype=np.uint64) + + low = np.array([0], dtype=np.double) + high = np.array([f_signal_max], dtype=np.double) + estimator = BayesEstimator(apriori = "uniform", dl=dl, ndim=1) + lambdas_K = np.array([[1]], dtype=np.double) + lambdas_v = np.array([1], dtype=np.double) + lambdas_d = np.array([5], dtype=np.double) + + f = estimator.MAP(F=samples, + lambdas_d=lambdas_d, + lambdas_v=lambdas_v, + lambdas_K=lambdas_K, + apriori_params=None, + limits_low=low, + limits_high=high) + assert f == signal + +def test_MAP_1(): + background = np.zeros((5,5)) + flat = np.ones((5,5)) + true_signal = np.ones((5,5))*4 + + nsamples = 10 + samples = np.zeros((nsamples, 5, 5)) + backgrounds = np.zeros((nsamples, 5, 5)) + flats = np.zeros((nsamples, 5, 5)) + Ks = np.zeros((nsamples, 1)) + for i in range(nsamples): + samples[i,:,:] = true_signal * flat + background + backgrounds[i,:,:] = background + flats[i,:,:] = flat + Ks[i,0] = 1 + + max_signal = np.array([10]) + clip = 0.8 + dl = 0.05 + apriori_params = None + estimator = BayesEstimator(apriori = "uniform", dl=dl, ndim=1) + + estimated = estimate(samples, Ks, backgrounds, flats, max_signal, estimator, apriori_params, clip, "MAP") + assert len(estimated.shape) == 3 + assert estimated.shape[0] == true_signal.shape[0] + assert estimated.shape[1] == true_signal.shape[1] + assert estimated.shape[2] == 1 + estimated = estimated[:,:,0] + assert (estimated == true_signal).all() + +def test_dark_flat_1(): + dark = np.zeros((5,5)) + flat = np.ones((5,5)) + true_signal = np.ones((5,5))*4 + + nsamples = 10 + samples = np.zeros((nsamples, 5, 5)) + for i in range(nsamples): + samples[i,:,:] = true_signal * flat + dark + + max_signal = 10 + clip = 0.8 + dl = 0.05 + + apriori_params = None + + estimated = estimate_with_dark_flat(samples, + dark, + flat, + max_signal, + dl, + "uniform", + None, + clip, + "MAP") + + assert len(estimated.shape) == 3 + assert estimated.shape[0] == true_signal.shape[0] + assert estimated.shape[1] == true_signal.shape[1] + assert estimated.shape[2] == 1 + estimated = estimated[:,:,0] + assert (estimated == true_signal).all() + +def test_dark_flat_sky_1(): + dark = np.zeros((5,5)) + sky = np.ones((5,5)) + flat = np.ones((5,5)) + true_signal = np.ones((5,5))*4 + + nsamples = 10 + samples = np.zeros((nsamples, 5, 5)) + for i in range(nsamples): + samples[i,:,:] = true_signal * flat + sky * flat + dark + + max_signal = 10 + clip = 0.8 + dl = 0.05 + + apriori_params = None + + estimated = estimate_with_dark_flat_sky(samples, + dark, + flat, + sky, + max_signal, + dl, + "uniform", + None, + clip, + "MAP") + + assert len(estimated.shape) == 3 + assert estimated.shape[0] == true_signal.shape[0] + assert estimated.shape[1] == true_signal.shape[1] + assert estimated.shape[2] == 1 + estimated = estimated[:,:,0] + assert (estimated == true_signal).all() + +def test_dark_flat_sky_continuum_1(): + h = 1 + w = 1 + dark = np.zeros((h,w)) + sky = np.ones((h,w)) + flat = np.ones((h,w)) + true_signal_emission = np.ones((h,w))*1 + true_signal_continuum = np.ones((h,w))*0.5 + K = 2 + + nsamples_wide = 10 + samples_wide = np.zeros((nsamples_wide, h, w)) + for i in range(nsamples_wide): + samples_wide[i,:,:] = (true_signal_continuum * K + true_signal_emission) * flat + sky * flat + dark + + nsamples_narrow = 10 + samples_narrow = np.zeros((nsamples_narrow, h, w)) + for i in range(nsamples_narrow): + samples_narrow[i,:,:] = (true_signal_continuum + true_signal_emission) * flat + sky * flat + dark + + max_signal = 4 + clip = 0.8 + dl = 0.02 + + apriori_params = None + + estimated = estimate_with_dark_flat_sky_continuum(samples_narrow, + dark, + flat, + sky, + max_signal, + samples_wide, + dark, + flat, + sky, + max_signal, + K, + dl, + "uniform", + apriori_params, + clip, + "MAP") + + assert len(estimated.shape) == 3 + assert estimated.shape[0] == true_signal_emission.shape[0] + assert estimated.shape[1] == true_signal_emission.shape[1] + assert estimated.shape[2] == 2 + estimated_continuum = estimated[:,:,0] + estimated_emission = estimated[:,:,1] + print(estimated_continuum) + print(estimated_emission) + assert (estimated_continuum == true_signal_continuum).all() + assert (estimated_emission == true_signal_emission).all()