diff --git a/.github/workflows/build-tests.yml b/.github/workflows/build-tests.yml
index 11dc3093..5c4bbb13 100644
--- a/.github/workflows/build-tests.yml
+++ b/.github/workflows/build-tests.yml
@@ -23,26 +23,40 @@ jobs:
shell: bash --login {0}
run: unzip TestData.zip
- - name: Set up conda environment
+ - name: Set up conda (Miniconda only)
uses: conda-incubator/setup-miniconda@v2
with:
- activate-environment: contextsv
- environment-file: environment.yml
- python-version: 3.9
- auto-activate-base: false
+ auto-activate-base: true
- - name: Install samtools and bcftools using sudo apt-get
+ - name: Configure conda channels and create environment
+ shell: bash -l {0}
run: |
- sudo apt-get update
- sudo apt-get install -y samtools bcftools
+ conda config --remove channels defaults || true
+ conda config --add channels conda-forge
+ conda config --add channels bioconda
+ conda config --set channel_priority strict
+ conda info # confirm the change
+ conda env create -f environment.yml
- name: Build C++ code
shell: bash --login {0} # --login enables PATH variable access
run: |
- make
+ source $(conda info --base)/etc/profile.d/conda.sh
+ conda activate contextsv
+ echo "CONDA_PREFIX=$CONDA_PREFIX"
+ ls -l $CONDA_PREFIX/include/htslib
+ make CONDA_PREFIX=$CONDA_PREFIX
- name: Run unit tests
shell: bash --login {0}
run: |
- mkdir -p tests/output
- python -m pytest -s -v tests/test_general.py
+ source $(conda info --base)/etc/profile.d/conda.sh
+ conda activate contextsv
+ ./build/contextsv --version
+ ./build/contextsv --help
+
+ # run: |
+ # source $(conda info --base)/etc/profile.d/conda.sh
+ # conda activate contextsv
+ # mkdir -p tests/output
+ # python -m pytest -s -v tests/test_general.py
diff --git a/.gitignore b/.gitignore
index 50575520..b7478d26 100644
--- a/.gitignore
+++ b/.gitignore
@@ -55,6 +55,7 @@ CMakeSettings.json
# Output folder
output/
+python/
# Doxygen
docs/html/
@@ -67,12 +68,15 @@ python/dbscan
python/agglo
linktoscripts
tests/data
+tests/cpp_module_out
# Population allele frequency filepaths
data/gnomadv2_filepaths.txt
data/gnomadv3_filepaths.txt
data/gnomadv4_filepaths.txt
+data/gnomadv4_filepaths_ssd.txt
data/gnomadv4_hg19_filepaths.txt
+data/gnomadv4_hg19_filepaths_ssd.txt
# Training data
data/sv_scoring_dataset/
@@ -84,3 +88,17 @@ data/hg19ToHg38.over.chain.gz
# Test images
python/dbscan_clustering*.png
python/dist_plots
+upset_plot*.png
+
+# Temporary files
+lib/.nfs*
+valgrind.log
+
+# Log files
+*.log
+*.err
+*.out
+
+# Snakemake files
+.snakemake
+snakemake_bench/results/
diff --git a/Makefile b/Makefile
index b6f7ddab..207f7e09 100644
--- a/Makefile
+++ b/Makefile
@@ -1,11 +1,53 @@
+# Directories
INCL_DIR := $(CURDIR)/include
SRC_DIR := $(CURDIR)/src
+BUILD_DIR := $(CURDIR)/build
LIB_DIR := $(CURDIR)/lib
+# Version header
+VERSION := $(shell git describe --tags --always)
+VERSION_HEADER := $(INCL_DIR)/version.h
+.PHONY: $(VERSION_HEADER)
+ @echo "#pragma once" > $@
+ @echo "#define VERSION \"$(VERSION)\"" >> $@
-all:
- # Generate the SWIG wrapper (C++ -> Python)
- swig -c++ -python -I$(INCL_DIR) -o $(SRC_DIR)/swig_wrapper.cpp -outdir $(LIB_DIR) $(SRC_DIR)/swig_wrapper.i
+# Conda environment directories
+CONDA_PREFIX := $(shell echo $$CONDA_PREFIX)
+CONDA_INCL_DIR := $(CONDA_PREFIX)/include
+CONDA_LIB_DIR := $(CONDA_PREFIX)/lib
- # Compile the SWIG wrapper using setuptools
- python3 setup.py build_ext --build-lib $(LIB_DIR)
+# Compiler and Flags
+CXX := g++
+CXXFLAGS := -std=c++17 -g -I$(INCL_DIR) -I$(CONDA_INCL_DIR) -Wall -Wextra -pedantic
+
+# Linker Flags
+# Ensure that the library paths are set correctly for linking
+LDFLAGS := -L$(LIB_DIR) -L$(CONDA_LIB_DIR) -Wl,-rpath=$(CONDA_LIB_DIR) # Add rpath for shared libraries
+LDLIBS := -lhts # Link with libhts.a or libhts.so
+
+# Sources and Output
+SOURCES := $(filter-out $(SRC_DIR)/swig_wrapper.cpp, $(wildcard $(SRC_DIR)/*.cpp)) # Filter out the SWIG wrapper from the sources
+OBJECTS := $(patsubst $(SRC_DIR)/%.cpp,$(BUILD_DIR)/%.o,$(SOURCES))
+TARGET := $(BUILD_DIR)/contextsv
+
+# Default target
+all: $(TARGET)
+
+# Debug target
+debug: CXXFLAGS += -DDEBUG
+debug: all
+
+# Link the executable
+$(TARGET): $(OBJECTS)
+ @mkdir -p $(BUILD_DIR)
+ $(CXX) $(CXXFLAGS) -o $@ $^ $(LDFLAGS) $(LDLIBS)
+
+# Compile source files
+$(BUILD_DIR)/%.o: $(SRC_DIR)/%.cpp
+ @mkdir -p $(BUILD_DIR)
+ $(CXX) $(CXXFLAGS) -c $< -o $@
+
+# Clean the build directory
+clean:
+ rm -rf $(BUILD_DIR)
+
\ No newline at end of file
diff --git a/README.md b/README.md
index e84f006d..707f6f64 100644
--- a/README.md
+++ b/README.md
@@ -12,33 +12,51 @@ corresponding reference genome (FASTA), a VCF with high-quality SNPs
Class documentation is available at https://wglab.openbioinformatics.org/ContextSV
-## Installation (Linux)
-### Using Anaconda (recommended)
-First, install [Anaconda](https://www.anaconda.com/).
+## Installation
-Next, create a new environment. This installation has been tested with Python 3.11:
-
-```
-conda create -n contextsv python=3.11
-conda activate contextsv
-```
-
-ContextSV can then be installed using the following command:
+### Building from source (for testing/development)
+ContextSV requires HTSLib as a dependency that can be installed using [Anaconda](https://www.anaconda.com/). Create an environment
+containing HTSLib:
```
-conda install -c bioconda -c wglab contextsv=1.0.0
+conda create -n htsenv -c bioconda -c conda-forge htslib
+conda activate htsenv
```
-### Building from source (for testing/development)
-First install [Anaconda](https://www.anaconda.com/). Then follow the instructions below to install LongReadSum and its dependencies:
+Then follow the instructions below to build ContextSV:
```
git clone https://github.com/WGLab/ContextSV
cd ContextSV
-conda env create -f environment.yml
make
```
+ContextSV can then be run:
+```
+./build/contextsv --help
+
+Usage: ./build/contextsv [options]
+Options:
+ -b, --bam Long-read BAM file (required)
+ -r, --ref Reference genome FASTA file (required)
+ -s, --snp SNPs VCF file (required)
+ -o, --outdir Output directory (required)
+ -c, --chr Chromosome
+ -r, --region Region (start-end)
+ -t, --threads Number of threads
+ -h, --hmm HMM file
+ -n, --sample-size Sample size for HMM predictions
+ --min-cnv Minimum CNV length
+ --eps DBSCAN epsilon
+ --min-pts-pct Percentage of mean chr. coverage to use for DBSCAN minimum points
+ -e, --eth ETH file
+ -p, --pfb PFB file
+ --save-cnv Save CNV data
+ --debug Debug mode with verbose logging
+ --version Print version and exit
+ -h, --help Print usage and exit
+```
+
## Downloading gnomAD SNP population frequencies
SNP population allele frequency
information is used for copy number predictions in this tool (see
@@ -53,7 +71,7 @@ Download links for genome VCF files are located here (last updated April 3,
- **gnomAD v2.1.1 (GRCh37)**: https://gnomad.broadinstitute.org/downloads#2
-### Example download
+### Script for downloading gnomAD VCFs
```
download_dir="~/data/gnomad/v4.0.0/"
@@ -78,71 +96,6 @@ X=~/data/gnomad/v4.0.0/gnomad.genomes.v4.0.sites.chrX.vcf.bgz
Y=~/data/gnomad/v4.0.0/gnomad.genomes.v4.0.sites.chrY.vcf.bgz
```
-## Calling structural variants
-### Example full script generating a merged VCF of structural variants
-```
-# Activate the environment
-conda activate contextsv
-
-# Set the input reference genome
-ref_file="~/data/GRCh38.fa"
-
-# Set the input alignment file (e.g. from minimap2)
-long_read_bam="~/data/HG002.GRCh38.bam"
-
-# Set the input SNPs file (e.g. from NanoCaller)
-snps_file="~/data/variant_calls.snps.vcf.gz"
-
-# Set the SNP population frequencies filepath
-pfb_file="~/data/gnomadv4_filepaths.txt"
-
-# Set the output directory
-output_dir=~/data/contextSV_output
-
-# Specify the number of threads (system-specific)
-thread_count=40
-
-# Run SV calling (~3-4 hours for whole-genome, 40 cores)
-python contextsv --threads $thread_count -o $output_dir -lr $long_read_bam --snps $snps_file --reference $ref_file --pfb $pfb_file
-
-# The output VCF filepath is located here:
-output_vcf=$output_dir/sv_calls.vcf
-
-# Merge SVs (~3-4 hours for whole-genome, 40 cores)
-python contextsv --merge $output_vcf
-
-# The final merged VCF filepath is located here:
-merged_vcf=$output_dir/sv_calls.merged.vcf
-```
-
-## Input arguments
-
-```
-python contextsv --help
-
-ContextSV: A tool for integrative structural variant detection.
-
-options:
- -h, --help show this help message and exit
- -lr LONG_READ, --long-read LONG_READ
- path to the long read alignment BAM file
- -g REFERENCE, --reference REFERENCE
- path to the reference genome FASTA file
- -s SNPS, --snps SNPS path to the SNPs VCF file
- --pfb PFB path to the file with SNP population frequency VCF filepaths (see docs for format)
- -o OUTPUT, --output OUTPUT
- path to the output directory
- -r REGION, --region REGION
- region to analyze (e.g. chr1, chr1:1000-2000). If not provided, the entire genome will be analyzed
- -t THREADS, --threads THREADS
- number of threads to use
- --hmm HMM path to the PennCNV HMM file
- --window-size WINDOW_SIZE
- window size for calculating log2 ratios for CNV predictions (default: 10 kb)
- -d, --debug debug mode (verbose logging)
- -v, --version print the version number and exit
-```
-
## Revision history
For release history, please visit [here](https://github.com/WGLab/ContextSV/releases).
diff --git a/__main__.py b/__main__.py
index a888cdbf..3821b8d1 100644
--- a/__main__.py
+++ b/__main__.py
@@ -214,7 +214,6 @@ def main():
# Set input parameters
input_data = contextsv.InputData()
input_data.setVerbose(args.debug)
- input_data.setShortReadBam(args.short_read)
input_data.setLongReadBam(args.long_read)
input_data.setRefGenome(args.reference)
input_data.setSNPFilepath(args.snps)
diff --git a/environment.yml b/environment.yml
index 26f46822..867f41a4 100644
--- a/environment.yml
+++ b/environment.yml
@@ -1,21 +1,9 @@
name: contextsv
channels:
- - defaults
- - anaconda
- - conda-forge
- bioconda
+ - conda-forge
dependencies:
- - python
+ - python=3.10
- numpy
- htslib
- - swig
- pytest
- - plotly
-
-# [A] Generate directly from the file:
-# conda env create -f environment.yml -n contextsv
-# [B] Generate after creating a new environment:
-# conda create -n contextsv
-# conda activate contextsv
-# conda env update -f environment.yml --prune # Prune removes unused packages
-
diff --git a/include/ThreadPool.h b/include/ThreadPool.h
new file mode 100644
index 00000000..41832030
--- /dev/null
+++ b/include/ThreadPool.h
@@ -0,0 +1,98 @@
+#ifndef THREAD_POOL_H
+#define THREAD_POOL_H
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+class ThreadPool {
+public:
+ ThreadPool(size_t);
+ template
+ auto enqueue(F&& f, Args&&... args)
+ -> std::future::type>;
+ ~ThreadPool();
+private:
+ // need to keep track of threads so we can join them
+ std::vector< std::thread > workers;
+ // the task queue
+ std::queue< std::function > tasks;
+
+ // synchronization
+ std::mutex queue_mutex;
+ std::condition_variable condition;
+ bool stop;
+};
+
+// the constructor just launches some amount of workers
+inline ThreadPool::ThreadPool(size_t threads)
+ : stop(false)
+{
+ for(size_t i = 0;i task;
+
+ {
+ std::unique_lock lock(this->queue_mutex);
+ this->condition.wait(lock,
+ [this]{ return this->stop || !this->tasks.empty(); });
+ if(this->stop && this->tasks.empty())
+ return;
+ task = std::move(this->tasks.front());
+ this->tasks.pop();
+ }
+
+ task();
+ }
+ }
+ );
+}
+
+// add new work item to the pool
+template
+auto ThreadPool::enqueue(F&& f, Args&&... args)
+ -> std::future::type>
+{
+ using return_type = typename std::result_of::type;
+
+ auto task = std::make_shared< std::packaged_task >(
+ std::bind(std::forward(f), std::forward(args)...)
+ );
+
+ std::future res = task->get_future();
+ {
+ std::unique_lock lock(queue_mutex);
+
+ // don't allow enqueueing after stopping the pool
+ if(stop)
+ throw std::runtime_error("enqueue on stopped ThreadPool");
+
+ tasks.emplace([task](){ (*task)(); });
+ }
+ condition.notify_one();
+ return res;
+}
+
+// the destructor joins all threads
+inline ThreadPool::~ThreadPool()
+{
+ {
+ std::unique_lock lock(queue_mutex);
+ stop = true;
+ }
+ condition.notify_all();
+ for(std::thread &worker: workers)
+ worker.join();
+}
+
+#endif
diff --git a/include/cnv_caller.h b/include/cnv_caller.h
index c913c24b..afdd78b3 100644
--- a/include/cnv_caller.h
+++ b/include/cnv_caller.h
@@ -6,19 +6,19 @@
#include "khmm.h"
#include "input_data.h"
-#include "cnv_data.h"
-#include "sv_data.h"
#include "sv_types.h"
+#include "sv_object.h"
+#include "utils.h"
/// @cond
#include
#include
#include
#include
-#include
+// #include
+#include
#include
-#include "snp_info.h"
/// @endcond
using namespace sv_types;
@@ -26,7 +26,7 @@ using namespace sv_types;
// SNP data is a struct containing vectors used in predicting copy number
// states. It is sorted by SNP position.
struct SNPData {
- std::vector pos;
+ std::vector pos;
std::vector pfb;
std::vector baf;
std::vector log2_cov;
@@ -47,105 +47,68 @@ struct SNPData {
// CNVCaller: Detect CNVs and return the state sequence by SNP position
class CNVCaller {
private:
- InputData* input_data;
- mutable std::mutex sv_candidates_mtx; // SV candidate map mutex
- mutable std::mutex snp_data_mtx; // SNP data mutex
- mutable std::mutex hmm_mtx; // HMM mutex
- CHMM hmm;
- SNPData snp_data;
- SNPInfo snp_info;
- double mean_chr_cov = 0.0;
- std::unordered_map pos_depth_map;
+ std::shared_mutex& shared_mutex;
- // Define a map of CNV genotypes by HMM predicted state.
- // We only use the first 3 genotypes (0/0, 0/1, 1/1) for the VCF output.
- // Each of the 6 state predictions corresponds to a copy number state
- // (0=No predicted state)
- // 0: 1/1 (Normal diploid: no copy number change, GT: 1/1)
- // 1: 0/0 (Two copy loss: homozygous deletion, GT: 0/0)
- // 2: 1/0 (One copy loss: heterozygous deletion, GT: 0/1)
- // 3: 1/1 (Normal diploid: no copy number change, GT: 1/1)
- // 4: 1/1 (Copy neutral LOH: no copy number change, GT: 1/1)
- // 5: 2/1 (One copy gain: heterozygous duplication, GT: 1/2->0/1)
- // 6: 2/2 (Two copy gain: homozygous duplication, GT: 2/2->1/1)
- std ::map cnv_genotype_map = {
- {0, "1/1"},
- {1, "0/0"},
- {2, "0/1"},
- {3, "1/1"},
- {4, "1/1"},
- {5, "0/1"},
- {6, "1/1"}
- };
+ void updateSNPData(SNPData& snp_data, uint32_t pos, double pfb, double baf, double log2_cov, bool is_snp);
- // Define a map of CNV types by HMM predicted state (0=No predicted state)
- std ::map cnv_type_map = {
- {0, sv_types::UNKNOWN},
- {1, sv_types::DEL},
- {2, sv_types::DEL},
- {3, sv_types::UNKNOWN},
- {4, sv_types::UNKNOWN},
- {5, sv_types::DUP},
- {6, sv_types::DUP}
- };
-
- void updateSNPData(SNPData& snp_data, int64_t pos, double pfb, double baf, double log2_cov, bool is_snp);
-
- std::pair, double> runViterbi(CHMM hmm, SNPData &snp_data);
+ void runViterbi(const CHMM& hmm, SNPData& snp_data, std::pair, double>& prediction) const;
// Query a region for SNPs and return the SNP data
- std::pair querySNPRegion(std::string chr, int64_t start_pos, int64_t end_pos, SNPInfo &snp_info, std::unordered_map &pos_depth_map, double mean_chr_cov);
-
- // Run copy number prediction for a chunk of SV candidates from CIGAR strings
- void runCIGARCopyNumberPredictionChunk(std::string chr, std::map& sv_candidates, std::vector sv_chunk, SNPInfo& snp_info, CHMM hmm, int window_size, double mean_chr_cov, std::unordered_map& pos_depth_map);
-
- void updateSVCopyNumber(std::map& sv_candidates, SVCandidate key, int sv_type_update, std::string data_type, std::string genotype, double hmm_likelihood);
-
- void updateDPValue(std::map& sv_candidates, SVCandidate key, int dp_value);
+ void querySNPRegion(std::string chr, uint32_t start_pos, uint32_t end_pos, const std::vector& pos_depth_map, double mean_chr_cov, SNPData& snp_data, const InputData& input_data) const;
// Split a region into chunks for parallel processing
- std::vector splitRegionIntoChunks(std::string chr, int64_t start_pos, int64_t end_pos, int chunk_count);
-
- // Split SV candidates into chunks for parallel processing
- std::vector> splitSVCandidatesIntoChunks(std::map& sv_candidates, int chunk_count);
-
- // Merge the read depths from a chunk into the main read depth map
- void mergePosDepthMaps(std::unordered_map& main_map, std::unordered_map& map_update);
+ std::vector splitRegionIntoChunks(std::string chr, uint32_t start_pos, uint32_t end_pos, int chunk_count) const;
public:
- CNVCaller(InputData& input_data);
+ CNVCaller(std::shared_mutex& shared_mutex) : shared_mutex(shared_mutex) {}
- // Load file data for a chromosome (SNP positions, BAF values, and PFB values)
- void loadChromosomeData(std::string chr);
+ // Define a map of CNV genotypes by HMM predicted state.
+ // We only use the first 3 genotypes (0/0, 0/1, 1/1) for the VCF output.
+ // Each of the 6 state predictions corresponds to a copy number state
+ // (0=No predicted state)
+ // 0: Unknown (No predicted state)
+ // 1: 1/1 (Two copy loss: homozygous deletion, GT: 1/1 for homozygous variant)
+ // 2: 0/1 (One copy loss: heterozygous deletion, GT: 0/1)
+ // 3: 0/0 (Normal diploid: no copy number change, GT: 0/0 for homozygous reference)
+ // 4: 1/1 (Copy neutral LOH: no copy number change, GT: 1/1 for homozygous variant)
+ // 5: 2/1 (One copy gain: heterozygous duplication, GT: 1/2->0/1)
+ // 6: 2/2 (Two copy gain: homozygous duplication, GT: 2/2->1/1)
+ const std::unordered_map StateGenotypeMap = {
+ {0, Genotype::UNKNOWN},
+ {1, Genotype::HOMOZYGOUS_ALT},
+ {2, Genotype::HETEROZYGOUS},
+ {3, Genotype::HOMOZYGOUS_REF},
+ {4, Genotype::HOMOZYGOUS_ALT},
+ {5, Genotype::HETEROZYGOUS},
+ {6, Genotype::HOMOZYGOUS_ALT}
+ };
- // Run copy number prediction for a pair of SV candidates, and add only
- // the SV candidate with the highest likelihood
- std::tuple runCopyNumberPredictionPair(std::string chr, SVCandidate sv_one, SVCandidate sv_two);
+ // Function to get the genotype string from the state
+ inline Genotype getGenotypeFromCNState(int cn_state) const {
+ // return StateGenotypeMap.at(cn_state);
+ try {
+ return StateGenotypeMap.at(cn_state);
+ } catch (const std::out_of_range& e) {
+ printError("ERROR: Invalid CN state: " + std::to_string(cn_state));
+ return Genotype::UNKNOWN;
+ }
+ }
+
+ // Run copy number prediction for a single SV candidate, returning the
+ // likelihood, predicted CNV type, genotype, and whether SNPs were found
+ std::tuple runCopyNumberPrediction(std::string chr, const CHMM& hmm, uint32_t start_pos, uint32_t end_pos, double mean_chr_cov, const std::vector& pos_depth_map, const InputData& input_data) const;
// Run copy number prediction for SVs meeting the minimum length threshold obtained from CIGAR strings
- SNPData runCIGARCopyNumberPrediction(std::string chr, std::map& sv_candidates, int min_length);
-
- void updateSVsFromCopyNumberPrediction(SVData& sv_calls, std::vector>& sv_list, std::string chr);
-
- // Calculate the mean chromosome coverage
- double calculateMeanChromosomeCoverage(std::string chr);
-
- // Calculate read depths for a region
- void calculateDepthsForSNPRegion(std::string chr, int64_t start_pos, int64_t end_pos, std::unordered_map& pos_depth_map);
+ void runCIGARCopyNumberPrediction(std::string chr, std::vector& sv_candidates, const CHMM& hmm, double mean_chr_cov, const std::vector& pos_depth_map, const InputData& input_data) const;
- // Calculate the log2 ratio for a region given the read depths and mean
- // chromosome coverage
- double calculateLog2Ratio(uint32_t start_pos, uint32_t end_pos, std::unordered_map& pos_depth_map, double mean_chr_cov);
+ void calculateMeanChromosomeCoverage(const std::vector& chromosomes, std::unordered_map>& chr_pos_depth_map, std::unordered_map& chr_mean_cov_map, const std::string& bam_filepath, int thread_count) const;
- // Read SNP positions and BAF values from the VCF file of SNP calls
- void readSNPAlleleFrequencies(std::string chr, std::string filepath, SNPInfo& snp_info);
+ void readSNPAlleleFrequencies(std::string chr, uint32_t start_pos, uint32_t end_pos, std::vector& snp_pos, std::unordered_map& snp_baf, std::unordered_map& snp_pfb, const InputData& input_data) const;
- // Read SNP population frequencies from the PFB file and return a vector
- // of population frequencies for each SNP location
- void getSNPPopulationFrequencies(std::string chr, SNPInfo& snp_info);
+ // Save a TSV with B-allele frequencies, log2 ratios, and copy number predictions
+ void saveSVCopyNumberToTSV(SNPData& snp_data, std::string filepath, std::string chr, uint32_t start, uint32_t end, std::string sv_type, double likelihood) const;
- // Save a TSV with B-allele frequencies, log 2 ratios, and copy number predictions
- void saveSVCopyNumberToTSV(SNPData& snp_data, std::string filepath, std::string chr, int64_t start, int64_t end, std::string sv_type, double likelihood);
+ void saveSVCopyNumberToJSON(SNPData& before_sv, SNPData& after_sv, SNPData& snp_data, std::string chr, uint32_t start, uint32_t end, std::string sv_type, double likelihood, const std::string& filepath) const;
};
#endif // CNV_CALLER_H
diff --git a/include/cnv_data.h b/include/cnv_data.h
deleted file mode 100644
index a2ebd403..00000000
--- a/include/cnv_data.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifndef CNV_DATA_H
-#define CNV_DATA_H
-
-/// @cond
-#include
-#include
-#include