From 105b437f9565e729c033e9be4626ccb1909b7ca2 Mon Sep 17 00:00:00 2001 From: wintermutant Date: Sat, 14 Feb 2026 18:32:52 -0500 Subject: [PATCH 1/2] api tests and minor workflow refactoring --- .github/workflows/test.yml | 28 +++ .gitignore | 4 +- .../workflow_tools/workflow.py | 227 +++++------------- pyproject.toml | 6 + tests/conftest.py | 7 + tests/test_api.py | 223 +++++++++++++++++ 6 files changed, 325 insertions(+), 170 deletions(-) create mode 100644 .github/workflows/test.yml create mode 100644 tests/test_api.py diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000..b1a122c --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,28 @@ +name: API Tests + +on: + push: + branches: [master] + pull_request: + branches: [master] + +jobs: + test: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ["3.11"] + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Install dependencies + run: pip install -e ".[test]" + + - name: Run API tests + run: pytest tests/test_api.py -v diff --git a/.gitignore b/.gitignore index ece0de3..143d193 100755 --- a/.gitignore +++ b/.gitignore @@ -47,5 +47,5 @@ share/python-wheels/ MANIFEST # Personal -**/tests/ -tests/** +#**/tests/ +#tests/** diff --git a/bioinformatics_tools/workflow_tools/workflow.py b/bioinformatics_tools/workflow_tools/workflow.py index f77e130..4791203 100644 --- a/bioinformatics_tools/workflow_tools/workflow.py +++ b/bioinformatics_tools/workflow_tools/workflow.py @@ -7,27 +7,17 @@ from datetime import datetime from pathlib import Path -from bioinformatics_tools.caragols import clix -from bioinformatics_tools.caragols.condo import CxNode from bioinformatics_tools.file_classes.base_classes import command from bioinformatics_tools.workflow_tools.bapptainer import ( - CacheSifError, cache_sif_files, run_apptainer_container) -from bioinformatics_tools.workflow_tools.models import (ApptainerKey, - WorkflowKey) + CacheSifError, cache_sif_files) +from bioinformatics_tools.workflow_tools.models import WorkflowKey from bioinformatics_tools.workflow_tools.output_cache import restore_all, store_all +from bioinformatics_tools.workflow_tools.programs import ProgramBase LOGGER = logging.getLogger(__name__) WORKFLOW_DIR = Path(__file__).parent -apptainer_keys: dict[str, ApptainerKey] = { - 'prodigal': ApptainerKey( - executable='apptainer.lima', - sif_path='prodigal.sif', - commands=[] - ) -} - workflow_keys: dict[str, WorkflowKey] = { 'example': WorkflowKey( cmd_identifier='example', @@ -51,8 +41,8 @@ } -class WorkflowBase(clix.App): - '''Base class for all workflows. Allows us to have access to config, logging, and reporting +class WorkflowBase(ProgramBase): + '''Snakemake workflow execution. Inherits single-program commands from ProgramBase. ''' def __init__(self, workflow_id=None): @@ -105,7 +95,7 @@ def build_executable(self, key: WorkflowKey, config_dict: dict = None, mode='not # core_command.extend(output_list) return core_command - def _run_workflow(self, wf_command): + def _run_subprocess(self, wf_command): '''essentially a wrapper for subprocess.run() to tightly control snakemake execution''' LOGGER.debug('Received command and running: %s', wf_command) try: @@ -115,154 +105,83 @@ def _run_workflow(self, wf_command): self.failed(f'Critical ERROR during subprocess.run({wf_command}): {e}') return 0 - @command - def do_example(self): - '''example workflow to execute''' - - LOGGER.info('Config:\n%s', self.conf.show()) + def _run_pipeline(self, key_name: str, smk_config: dict, cache_map: dict = None, mode='dev'): + '''Shared pipeline execution: cache containers, restore outputs, run snakemake, store outputs.''' + selected_wf = workflow_keys.get(key_name) + if not selected_wf: + self.failed(f'No workflow key found for "{key_name}"') + return 1 - # ----------------------- Step 0 - Get the WorkflowKey ----------------------- # - if not (selected_wf := workflow_keys.get('example')): + # Download / ensure .sif files are cached + try: + cache_sif_files(selected_wf.sif_files) + except CacheSifError as e: + LOGGER.critical('Error with cache_sif_files: %s', e) + self.failed(f'Error with cache_sif_files: {e}') return 1 - # -------------- Step 1 - Get the input file (eventually files) -------------- # + # Restore cached outputs from DB so snakemake skips completed rules + db_path = smk_config.get('margie_db') + input_file = smk_config.get('input_fasta') + if cache_map and db_path and input_file: + restored = restore_all(db_path, input_file, cache_map) + LOGGER.info('Cache restore results: %s', restored) + + # Build and run snakemake + wf_command = self.build_executable(selected_wf, config_dict=smk_config, mode=mode) + LOGGER.info('Running snakemake command: %s', ' '.join(wf_command)) + self._run_subprocess(wf_command) + + # Store new outputs into DB cache + if cache_map and db_path and input_file: + store_all(db_path, input_file, cache_map) + + self.succeeded(msg=f'Workflow "{key_name}" completed successfully') + + @command + def do_example(self): + '''example workflow to execute''' input_file = self.conf.get('input') if not input_file: LOGGER.error('No input file specified. Use: dane_wf example input: ') self.failed('No input file specified') return 1 - # ------- Step 2 - Get the appropriate output file from the input file ------- # - # Derive output filename from input (e.g., file.fasta -> file-output.txt) - # Basically we need a way to trace input to final output input_path = Path(input_file) - output_file = f"{input_path.stem}-output.txt" - LOGGER.info('Input file: %s', input_file) - LOGGER.info('Output file: %s', output_file) - - # Log which snakemake executable will be used - try: - which_result = subprocess.run(['which', 'snakemake'], capture_output=True, text=True, check=True) - LOGGER.info('Using snakemake from: %s', which_result.stdout.strip()) - except subprocess.CalledProcessError: - LOGGER.warning('Could not find snakemake executable in PATH') - - # --- Step 3 - get program-specific params and send to snakemake as config --- # prodigal_config = self.conf.get('prodigal') - threads = prodigal_config.get('threads') - #TODO: Is there a way to automatically get all config from prodigal or control here + smk_config = { 'input_fasta': input_file, - 'output_fasta': output_file, - 'prodigal_threads': threads + 'output_fasta': f"{input_path.stem}-output.txt", + 'prodigal_threads': prodigal_config.get('threads'), } - # -------- TODO: Step 3.5 - Download / ensure .sif file is downloaded -------- # - try: - cache_sif_files(selected_wf.sif_files) - except CacheSifError as e: - LOGGER.critical('Error with cache_sif_files: %s', e) - self.failed(f'Error with cache_sif_files: {e}') - - # ----------------------- Step 4 - build the executable ---------------------- # - wf_command = self.build_executable(selected_wf, config_dict=smk_config) - LOGGER.info('Running snakemake command: %s', wf_command) - str_smk = ' '.join(wf_command) - LOGGER.info('String snakemake: %s', str_smk) - print(f'\n=== SNAKEMAKE COMMAND ===\n{str_smk}\n========================\n') - - # ------ Step 5 Execute the actual workflow (happens within our UV env) ------ # - self._run_workflow(wf_command) - self.succeeded(msg="All good in the neighborhood (AppleBees TM)") + self._run_pipeline('example', smk_config) - def get_prg_args(self, config_group): - '''find relevant configuration settings to add to container run - ''' - args_list = [] - - # Get the config node for this program - try: - prog_node: CxNode = self.conf.get(config_group) - except KeyError: - LOGGER.warning('No configuration found for %s', config_group) - return args_list - - # If it's not a CxNode (e.g., it's a simple value), return empty - if not hasattr(prog_node, 'children'): - LOGGER.warning('%s is not a configuration group', config_group) - return args_list - - # Iterate over the direct children of this config group - for key, value in prog_node.children.items(): - # Skip nested CxNode objects (only process direct key-value pairs) - if not isinstance(value, type(prog_node)): - # Convert key to command-line flag format - flag = f'--{key}' - # Convert value to string - str_value = str(value) - # Add to args list - args_list.extend([flag, str_value]) - - LOGGER.debug('Generated args for %s: %s', config_group, args_list) - return args_list - - @command - def do_prodigal(self): - '''run prodigal''' - EXECUTABLE = 'prodigal' - - if not (container := apptainer_keys.get('prodigal')): - self.failed('No known match for "prodigal"') - return - - prg_args = self.get_prg_args(config_group='prodigal') - prg_args.insert(0, EXECUTABLE) - LOGGER.info('Program arguments: %s', prg_args) - exit_code = run_apptainer_container(container, prg_args) - self.succeeded(msg='Successfully ran prodigal!') - @command def do_margie(self, mode='dev'): '''run margie workflow''' - - LOGGER.info('Config:\n%s', self.conf.show()) - - # ----------------------- Step 0 - Get the WorkflowKey ----------------------- # - if not (selected_wf := workflow_keys.get('margie')): - return 1 - - # -------------- Step 1 - Get the input file (eventually files) -------------- # input_file = self.conf.get('input') if not input_file: - LOGGER.error('No input file specified. Use: dane_wf example input: ') + LOGGER.error('No input file specified. Use: dane_wf margie input: ') self.failed('No input file specified') return 1 - # ------- Step 2 - Get the appropriate output file from the input file ------- # - # Derive output filename from input (e.g., file.fasta -> file-output.txt) - # Basically we need a way to trace input to final output and use the stem as SAMPLE - input_path = Path(input_file) - out_prodigal = f"prodigal/{input_path.stem}-prodigal.tkn" - out_prodigal_faa = f"prodigal/{input_path.stem}-prodigal.faa" - out_prodigal_db = f"prodigal/{input_path.stem}-prodigal_db.tkn" - out_pfam = f"pfam/{input_path.stem}-pfam.tkn" - out_pfam_db = f"pfam/{input_path.stem}-pfam_db.tkn" + stem = Path(input_file).stem + prodigal_config = self.conf.get('prodigal') + margie_db = self.conf.get('margie_db', '/depot/lindems/data/margie/margie.db') + + # Output paths + out_prodigal = f"prodigal/{stem}-prodigal.tkn" + out_prodigal_faa = f"prodigal/{stem}-prodigal.faa" + out_prodigal_db = f"prodigal/{stem}-prodigal_db.tkn" + out_pfam = f"pfam/{stem}-pfam.tkn" + out_pfam_db = f"pfam/{stem}-pfam_db.tkn" out_cog_classify = "cog/cog_classify.tsv" out_cog_count = "cog/cog_count.tsv" - out_cog_db = f"cog/{input_path.stem}-cog_db.tkn" - out_dbcan = f"{input_path.stem}-dbcan.tkn" # Not being used not - out_kofam = f"{input_path.stem}-kofam.tkn" # Not being used not - LOGGER.info('Input file: %s', input_file) - LOGGER.info('out_prodigal file: %s', out_prodigal) - LOGGER.info('out_dbcan file: %s', out_dbcan) + out_cog_db = f"cog/{stem}-cog_db.tkn" - # --- Step 3 - get program-specific params and send to snakemake as config --- # - prodigal_config = self.conf.get('prodigal') - threads = prodigal_config.get('threads') - margie_db = self.conf.get('margie_db', '/depot/lindems/data/margie/margie.db') - #TODO: way to automatically get all config from prodigal or do control this here? - # TODO: These can probably be default values specified in margie.smk smk_config = { 'input_fasta': input_file, 'out_prodigal': out_prodigal, @@ -273,24 +192,12 @@ def do_margie(self, mode='dev'): 'out_cog_classify': out_cog_classify, 'out_cog_count': out_cog_count, 'out_cog_db': out_cog_db, - 'out_dbcan': out_dbcan, - 'out_kofam': out_kofam, - 'prodigal_threads': threads, + 'out_dbcan': f"{stem}-dbcan.tkn", + 'out_kofam': f"{stem}-kofam.tkn", + 'prodigal_threads': prodigal_config.get('threads'), 'margie_db': margie_db, } - # ------: Step 3.5 - Download / ensure .sif file is downloaded -------- # - # ~/.cache/bioinformatics-tools/prodigal.sif --> multiple for some snakemake pipelines - try: - cache_sif_files(selected_wf.sif_files) - except CacheSifError as e: - LOGGER.critical('Error with cache_sif_files: %s', e) - self.failed(f'Error with cache_sif_files: {e}') - - # ----------- Step 3.6 - Restore cached outputs from the DB ----------- # - # This is super important and revives the working directory with cache - # so snakemake can use its default DAG logic from the database. e.g. - places previously generated output - # files into the working directory so snakemake knows which rules to skip cache_map = { 'prodigal': [out_prodigal, out_prodigal_faa], 'prodigal_db': [out_prodigal_db], @@ -299,21 +206,5 @@ def do_margie(self, mode='dev'): 'cog': [out_cog_classify, out_cog_count], 'cog_db': [out_cog_db], } - restored = restore_all(margie_db, input_file, cache_map) - LOGGER.info('Cache restore results: %s', restored) - - # ----------------------- Step 4 - build the executable ---------------------- # - wf_command = self.build_executable(selected_wf, config_dict=smk_config, mode=mode) - - LOGGER.info('Running snakemake command: %s', wf_command) - str_smk = ' '.join(wf_command) - LOGGER.info('\n=== SNAKEMAKE COMMAND ===\n%s\n========================\n', str_smk) - - # ------ Step 5 Execute the actual workflow (happens within our UV env) ------ # - self._run_workflow(wf_command) - - # ---- Step 5.5 - Store new outputs into DB cache ---- # - store_all(margie_db, input_file, cache_map) - # ------------------- Step 6 - Provide output status report ------------------ # - self.succeeded(msg="All good in the neighborhood (AppleBees TM)") + self._run_pipeline('margie', smk_config, cache_map, mode=mode) diff --git a/pyproject.toml b/pyproject.toml index d98639c..e959f61 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -43,6 +43,12 @@ dependencies = [ "uvicorn" ] +[project.optional-dependencies] +test = [ + "pytest>=7.0", + "httpx>=0.23.0", +] + [project.urls] Homepage = "https://github.com/Diet-Microbiome-Interactions-Lab/GeneralTools" diff --git a/tests/conftest.py b/tests/conftest.py index ded6a84..dc69413 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -78,6 +78,9 @@ def pytest_configure(config): config.addinivalue_line( "markers", "cli: marks tests that test CLI functionality" ) + config.addinivalue_line( + "markers", "api: marks tests that test API endpoints" + ) # Pytest collection configuration @@ -87,6 +90,10 @@ def pytest_collection_modifyitems(config, items): # Mark CLI tests if "test_cli" in item.nodeid or "cli" in item.name.lower(): item.add_marker(pytest.mark.cli) + + # Mark API tests + if "test_api" in item.nodeid: + item.add_marker(pytest.mark.api) # Mark integration tests (tests that use subprocess or external files) if any(keyword in item.nodeid for keyword in ["subprocess", "real_test", "example_"]): diff --git a/tests/test_api.py b/tests/test_api.py new file mode 100644 index 0000000..499cda9 --- /dev/null +++ b/tests/test_api.py @@ -0,0 +1,223 @@ +""" +Pytest-based API tests using FastAPI TestClient. + +No live server required — TestClient handles requests in-process. + +Tiers: + 1. Health/root endpoints (no mocks) + 2. JobStore unit tests (in-memory, no mocks) + 3. SSH endpoints (mocked external calls) + 4. Path traversal security tests +""" +import uuid +from unittest.mock import patch + +import pytest +from fastapi.testclient import TestClient + +from bioinformatics_tools.api.main import app +from bioinformatics_tools.api.services.job_store import job_store + + +@pytest.fixture() +def client(): + """Fresh TestClient; clears job_store between tests.""" + job_store._jobs.clear() + with TestClient(app) as c: + yield c + job_store._jobs.clear() + + +# --------------------------------------------------------------------------- +# Tier 1 — Health / root endpoints (no mocks needed) +# --------------------------------------------------------------------------- + +class TestHealthEndpoints: + """Smoke tests for every health and info endpoint.""" + + def test_root(self, client): + resp = client.get("/") + assert resp.status_code == 200 + body = resp.json() + assert body["status"] == "success" + assert "endpoints" in body + + def test_health(self, client): + resp = client.get("/health") + assert resp.status_code == 200 + assert resp.json()["status"] == "success" + + def test_fasta_health(self, client): + resp = client.get("/v1/fasta/health") + assert resp.status_code == 200 + assert resp.json()["status"] == "success" + + def test_ssh_health(self, client): + resp = client.get("/v1/ssh/health") + assert resp.status_code == 200 + assert resp.json()["status"] == "success" + + def test_files_health(self, client): + resp = client.get("/v1/files/health") + assert resp.status_code == 200 + assert resp.json()["status"] == "success" + + def test_files_config(self, client): + resp = client.get("/v1/files/config") + assert resp.status_code == 200 + + def test_files_status(self, client): + resp = client.get("/v1/files/status") + assert resp.status_code == 200 + body = resp.json() + assert "status" in body + + +# --------------------------------------------------------------------------- +# Tier 2 — JobStore unit tests (purely in-memory) +# --------------------------------------------------------------------------- + +class TestJobStore: + """Direct tests of the JobStore singleton (no HTTP involved).""" + + def setup_method(self): + job_store._jobs.clear() + + def teardown_method(self): + job_store._jobs.clear() + + def test_create(self): + job = job_store.create("j1", "/genomes/ecoli.fasta") + assert job["job_id"] == "j1" + assert job["status"] == "pending" + assert job["genome_path"] == "/genomes/ecoli.fasta" + assert job["work_dir"] is None + assert "start_time" in job + + def test_get_missing(self): + assert job_store.get("nonexistent") is None + + def test_update(self): + job_store.create("j2", "/g") + job_store.update("j2", status="running", phase="Aligning") + job = job_store.get("j2") + assert job["status"] == "running" + assert job["phase"] == "Aligning" + + def test_append_log(self): + job_store.create("j3", "/g") + job_store.append_log("j3", "line one") + job_store.append_log("j3", "line two") + logs = job_store.get("j3")["logs"] + assert "line one" in logs + assert "line two" in logs + + def test_add_slurm_job(self): + job_store.create("j4", "/g") + job_store.add_slurm_job("j4", slurm_id="12345", rule="fastp") + slurm_jobs = job_store.get_slurm_jobs("j4") + assert len(slurm_jobs) == 1 + assert slurm_jobs[0]["job_id"] == "12345" + assert slurm_jobs[0]["rule"] == "fastp" + assert slurm_jobs[0]["status"] == "SUBMITTED" + + +class TestJobStatusEndpoint: + """HTTP-level tests for job_status that rely on job_store state.""" + + def test_job_status_endpoint(self, client): + job_store.create("test-job-1", "/genomes/test.fasta") + resp = client.get("/v1/ssh/job_status/test-job-1") + assert resp.status_code == 200 + body = resp.json() + assert body["job_id"] == "test-job-1" + assert body["status"] == "pending" + + def test_job_status_404(self, client): + resp = client.get("/v1/ssh/job_status/nonexistent") + assert resp.status_code == 404 + + +# --------------------------------------------------------------------------- +# Tier 3 — SSH endpoints with mocks +# --------------------------------------------------------------------------- + +class TestSSHEndpointsMocked: + """Endpoints that call external SSH/SLURM services — all mocked.""" + + @patch("bioinformatics_tools.api.routers.ssh.job_runner") + def test_run_margie(self, mock_runner, client): + resp = client.post( + "/v1/ssh/run_margie", + json={"genome_path": "/depot/genomes/ecoli.fasta"}, + ) + assert resp.status_code == 200 + body = resp.json() + assert body["success"] is True + assert "job_id" in body + + # Verify job_runner.submit_job was called with the created job_id + mock_runner.submit_job.assert_called_once() + call_args = mock_runner.submit_job.call_args + assert call_args[0][0] == body["job_id"] + + @patch("bioinformatics_tools.api.routers.ssh.ssh_slurm") + def test_run_slurm(self, mock_slurm, client): + mock_slurm.submit_slurm_job.return_value = "99999" + resp = client.post( + "/v1/ssh/run_slurm", + json={"script": "#!/bin/bash\necho hello"}, + ) + assert resp.status_code == 200 + body = resp.json() + assert body["success"] is True + assert body["job_id"] == "99999" + mock_slurm.submit_slurm_job.assert_called_once_with( + script_content="#!/bin/bash\necho hello" + ) + + @patch("bioinformatics_tools.api.routers.ssh.ssh_slurm") + def test_all_genomes(self, mock_slurm, client): + mock_slurm.get_genomes.return_value = ["genome1.fasta", "genome2.fasta"] + resp = client.get("/v1/ssh/all_genomes", params={"path": "/depot/genomes"}) + assert resp.status_code == 200 + body = resp.json() + assert body["success"] is True + assert len(body["Genomes"]) == 2 + mock_slurm.get_genomes.assert_called_once_with("/depot/genomes") + + +# --------------------------------------------------------------------------- +# Tier 4 — Path traversal security tests +# --------------------------------------------------------------------------- + +class TestPathTraversalSecurity: + """Ensure path-based endpoints reject directory traversal attempts.""" + + def _create_job_with_workdir(self): + jid = str(uuid.uuid4()) + job_store.create(jid, "/genomes/test.fasta") + job_store.update(jid, work_dir="/remote/work/dir") + return jid + + @patch("bioinformatics_tools.api.routers.ssh.ssh_sftp") + def test_job_files_path_traversal(self, mock_sftp, client): + jid = self._create_job_with_workdir() + resp = client.get( + f"/v1/ssh/job_files/{jid}", + params={"subdir": "../../etc"}, + ) + assert resp.status_code == 400 + assert "Invalid" in resp.json()["detail"] + mock_sftp.list_remote_dir.assert_not_called() + + @patch("bioinformatics_tools.api.routers.ssh.ssh_sftp") + def test_download_file_path_traversal(self, mock_sftp, client): + jid = self._create_job_with_workdir() + resp = client.get( + f"/v1/ssh/download_file/{jid}", + params={"path": "../../etc/passwd"}, + ) + assert resp.status_code == 400 + assert "Invalid" in resp.json()["detail"] + mock_sftp.stream_remote_file.assert_not_called() From 1be8a61070eda74417767001aa967756db3f4413 Mon Sep 17 00:00:00 2001 From: wintermutant Date: Sat, 14 Feb 2026 18:46:35 -0500 Subject: [PATCH 2/2] renamed Utilities to utilities --- bioinformatics_tools/{Utilities => utilities}/__init__.py | 0 bioinformatics_tools/{Utilities => utilities}/btssh.py | 0 bioinformatics_tools/{Utilities => utilities}/command_line.py | 0 bioinformatics_tools/{Utilities => utilities}/shell_output.py | 0 bioinformatics_tools/{Utilities => utilities}/ssh_connection.py | 0 bioinformatics_tools/{Utilities => utilities}/ssh_sftp.py | 0 bioinformatics_tools/{Utilities => utilities}/ssh_slurm.py | 0 bioinformatics_tools/{Utilities => utilities}/warnings.py | 0 8 files changed, 0 insertions(+), 0 deletions(-) rename bioinformatics_tools/{Utilities => utilities}/__init__.py (100%) rename bioinformatics_tools/{Utilities => utilities}/btssh.py (100%) rename bioinformatics_tools/{Utilities => utilities}/command_line.py (100%) rename bioinformatics_tools/{Utilities => utilities}/shell_output.py (100%) rename bioinformatics_tools/{Utilities => utilities}/ssh_connection.py (100%) rename bioinformatics_tools/{Utilities => utilities}/ssh_sftp.py (100%) rename bioinformatics_tools/{Utilities => utilities}/ssh_slurm.py (100%) rename bioinformatics_tools/{Utilities => utilities}/warnings.py (100%) diff --git a/bioinformatics_tools/Utilities/__init__.py b/bioinformatics_tools/utilities/__init__.py similarity index 100% rename from bioinformatics_tools/Utilities/__init__.py rename to bioinformatics_tools/utilities/__init__.py diff --git a/bioinformatics_tools/Utilities/btssh.py b/bioinformatics_tools/utilities/btssh.py similarity index 100% rename from bioinformatics_tools/Utilities/btssh.py rename to bioinformatics_tools/utilities/btssh.py diff --git a/bioinformatics_tools/Utilities/command_line.py b/bioinformatics_tools/utilities/command_line.py similarity index 100% rename from bioinformatics_tools/Utilities/command_line.py rename to bioinformatics_tools/utilities/command_line.py diff --git a/bioinformatics_tools/Utilities/shell_output.py b/bioinformatics_tools/utilities/shell_output.py similarity index 100% rename from bioinformatics_tools/Utilities/shell_output.py rename to bioinformatics_tools/utilities/shell_output.py diff --git a/bioinformatics_tools/Utilities/ssh_connection.py b/bioinformatics_tools/utilities/ssh_connection.py similarity index 100% rename from bioinformatics_tools/Utilities/ssh_connection.py rename to bioinformatics_tools/utilities/ssh_connection.py diff --git a/bioinformatics_tools/Utilities/ssh_sftp.py b/bioinformatics_tools/utilities/ssh_sftp.py similarity index 100% rename from bioinformatics_tools/Utilities/ssh_sftp.py rename to bioinformatics_tools/utilities/ssh_sftp.py diff --git a/bioinformatics_tools/Utilities/ssh_slurm.py b/bioinformatics_tools/utilities/ssh_slurm.py similarity index 100% rename from bioinformatics_tools/Utilities/ssh_slurm.py rename to bioinformatics_tools/utilities/ssh_slurm.py diff --git a/bioinformatics_tools/Utilities/warnings.py b/bioinformatics_tools/utilities/warnings.py similarity index 100% rename from bioinformatics_tools/Utilities/warnings.py rename to bioinformatics_tools/utilities/warnings.py