From 9312b11b78a30eebbca7e85e37a5c8c87a9f0eea Mon Sep 17 00:00:00 2001 From: Henrik Finsberg Date: Mon, 9 Feb 2026 15:19:07 +0100 Subject: [PATCH 1/2] Connect cli from stats to main cli --- src/mritk/cli.py | 17 ++- src/mritk/statistics/__init__.py | 7 +- src/mritk/statistics/cli.py | 210 +++++++++++++++++++++++++++++++ 3 files changed, 226 insertions(+), 8 deletions(-) create mode 100644 src/mritk/statistics/cli.py diff --git a/src/mritk/cli.py b/src/mritk/cli.py index d926ef3..902fc69 100644 --- a/src/mritk/cli.py +++ b/src/mritk/cli.py @@ -6,7 +6,7 @@ from rich_argparse import RichHelpFormatter -from . import download_data, info +from . import download_data, info, statistics def version_info(): @@ -48,16 +48,25 @@ def setup_parser(): subparsers = parser.add_subparsers(dest="command") # Download test data parser - download_parser = subparsers.add_parser("download-test-data", help="Download test data") + download_parser = subparsers.add_parser( + "download-test-data", help="Download test data", formatter_class=parser.formatter_class + ) download_parser.add_argument("outdir", type=Path, help="Output directory to download test data") - info_parser = subparsers.add_parser("info", help="Display information about a file") + info_parser = subparsers.add_parser( + "info", help="Display information about a file", formatter_class=parser.formatter_class + ) info_parser.add_argument("file", type=Path, help="File to display information about") info_parser.add_argument( "--json", action="store_true", help="Output information in JSON format" ) + stats_parser = subparsers.add_parser( + "stats", help="Compute MRI statistics", formatter_class=parser.formatter_class + ) + statistics.cli.add_arguments(stats_parser) + return parser @@ -77,6 +86,8 @@ def dispatch(parser: argparse.ArgumentParser, argv: Optional[Sequence[str]] = No elif command == "info": file = args.pop("file") info.nifty_info(file, json_output=args.pop("json")) + elif command == "stats": + statistics.cli.dispatch(args) else: logger.error(f"Unknown command {command}") parser.print_help() diff --git a/src/mritk/statistics/__init__.py b/src/mritk/statistics/__init__.py index 1ba5c7d..496fa01 100644 --- a/src/mritk/statistics/__init__.py +++ b/src/mritk/statistics/__init__.py @@ -4,9 +4,6 @@ Copyright (C) 2026 Simula Research Laboratory """ -from . import utils, compute_stats +from . import utils, compute_stats, cli -__all__ = [ - "utils", - "compute_stats", -] +__all__ = ["utils", "compute_stats", "cli"] diff --git a/src/mritk/statistics/cli.py b/src/mritk/statistics/cli.py new file mode 100644 index 0000000..9ad6074 --- /dev/null +++ b/src/mritk/statistics/cli.py @@ -0,0 +1,210 @@ +import argparse +import typing +from pathlib import Path +import pandas as pd + +from ..segmentation.groups import default_segmentation_groups +from .compute_stats import generate_stats_dataframe + + +def compute_mri_stats( + segmentation: Path, + mri: list[Path], + output: Path, + timetable: Path | None = None, + timelabel: str | None = None, + seg_regex: str | None = None, + mri_regex: str | None = None, + lut: Path | None = None, + info: str | None = None, + **kwargs, +): + import sys + import json + from rich.console import Console + from rich.panel import Panel + + # Setup Rich + console = Console() + + # Parse info dict from JSON string if provided + info_dict = None + if info: + try: + info_dict = json.loads(info) + except json.JSONDecodeError: + console.print("[bold red]Error:[/bold red] --info must be a valid JSON string.") + sys.exit(1) + + if not segmentation.exists(): + console.print(f"[bold red]Error:[/bold red] Missing segmentation file: {segmentation}") + sys.exit(1) + + # Validate all MRI paths before starting + for path in mri: + if not path.exists(): + console.print(f"[bold red]Error:[/bold red] Missing MRI file: {path}") + sys.exit(1) + + dataframes = [] + + # Loop through MRI paths + with console.status("[bold green]Processing MRIs...[/bold green]"): + for i, path in enumerate(mri): + console.print(f"[blue]Processing MRI {i + 1}/{len(mri)}:[/blue] {path.name}") + + try: + # Call the logic function + df = generate_stats_dataframe( + seg_path=segmentation, + mri_path=path, + timestamp_path=timetable, + timestamp_sequence=timelabel, + seg_pattern=seg_regex, + mri_data_pattern=mri_regex, + lut_path=lut, + info_dict=info_dict, + ) + dataframes.append(df) + except Exception as e: + console.print(f"[bold red]Failed to process {path.name}:[/bold red] {e}") + sys.exit(1) + + if dataframes: + final_df = pd.concat(dataframes) + final_df.to_csv(output, sep=";", index=False) + console.print( + Panel( + f"Stats successfully saved to:\n[bold green]{output}[/bold green]", + title="Success", + expand=False, + ) + ) + else: + console.print("[yellow]No dataframes generated.[/yellow]") + + +def get_stats_value(stats_file: Path, region: str, info: str, **kwargs): + """ + Replaces the @click.command('get') decorated function. + """ + import sys + from rich.console import Console + + # Setup Rich + console = Console() + + # Validate inputs + valid_regions = default_segmentation_groups().keys() + if region not in valid_regions: + console.print( + f"[bold red]Error:[/bold red] Region '{region}' " + "not found in default segmentation groups." + ) + sys.exit(1) + + valid_infos = [ + "sum", + "mean", + "median", + "std", + "min", + "max", + "PC1", + "PC5", + "PC25", + "PC75", + "PC90", + "PC95", + "PC99", + ] + if info not in valid_infos: + console.print( + f"[bold red]Error:[/bold red] Info '{info}' " + f"is invalid. Choose from: {', '.join(valid_infos)}" + ) + sys.exit(1) + + if not stats_file.exists(): + console.print(f"[bold red]Error:[/bold red] Stats file not found: {stats_file}") + sys.exit(1) + + # Process + try: + df = pd.read_csv(stats_file, sep=";") + region_row = df.loc[df["description"] == region] + + if region_row.empty: + console.print(f"[red]Region '{region}' not found in the stats file.[/red]") + sys.exit(1) + + info_value = region_row[info].values[0] + + # Output + console.print( + f"[bold cyan]{info}[/bold cyan] for [bold green]{region}[/bold green] " + f"= [bold white]{info_value}[/bold white]" + ) + return info_value + + except Exception as e: + console.print(f"[bold red]Error reading stats file:[/bold red] {e}") + sys.exit(1) + + +def add_arguments(parser: argparse.ArgumentParser): + subparsers = parser.add_subparsers(dest="command", help="Available commands") + + # --- Compute Command --- + parser_compute = subparsers.add_parser( + "compute", help="Compute MRI statistics", formatter_class=parser.formatter_class + ) + parser_compute.add_argument( + "--segmentation", "-s", type=Path, required=True, help="Path to segmentation file" + ) + parser_compute.add_argument( + "--mri", "-m", type=Path, nargs="+", required=True, help="Path to MRI data file(s)" + ) + parser_compute.add_argument( + "--output", "-o", type=Path, required=True, help="Output CSV file path" + ) + parser_compute.add_argument("--timetable", "-t", type=Path, help="Path to timetable file") + parser_compute.add_argument( + "--timelabel", "-l", dest="timelabel", type=str, help="Time label sequence" + ) + parser_compute.add_argument( + "--seg_regex", + "-sr", + dest="seg_regex", + type=str, + help="Regex pattern for segmentation filename", + ) + parser_compute.add_argument( + "--mri_regex", "-mr", dest="mri_regex", type=str, help="Regex pattern for MRI filename" + ) + parser_compute.add_argument("--lut", "-lt", dest="lut", type=Path, help="Path to Lookup Table") + parser_compute.add_argument("--info", "-i", type=str, help="Info dictionary as JSON string") + parser_compute.set_defaults(func=compute_mri_stats) + + # --- Get Command --- + parser_get = subparsers.add_parser( + "get", help="Get specific stats value", formatter_class=parser.formatter_class + ) + parser_get.add_argument( + "--stats_file", "-f", type=Path, required=True, help="Path to stats CSV file" + ) + parser_get.add_argument("--region", "-r", type=str, required=True, help="Region description") + parser_get.add_argument( + "--info", "-i", type=str, required=True, help="Statistic to retrieve (mean, std, etc.)" + ) + parser_get.set_defaults(func=get_stats_value) + + +def dispatch(args: dict[str, typing.Any]): + command = args.pop("command") + if command == "compute": + compute_mri_stats(**args) + elif command == "get": + get_stats_value(**args) + else: + raise ValueError(f"Unknown command: {command}") From 1ff7a03e42b91864b6fad519a7dfb504c11ded29 Mon Sep 17 00:00:00 2001 From: Henrik Finsberg Date: Mon, 9 Feb 2026 17:40:07 +0100 Subject: [PATCH 2/2] Update tests and remove os.path --- src/mritk/statistics/cli.py | 44 ++++----- src/mritk/statistics/compute_stats.py | 89 +---------------- test/conftest.py | 5 +- test/test_cli.py | 14 +-- test/test_mri_io.py | 9 +- test/test_mri_stats.py | 134 ++++++++++++++------------ 6 files changed, 113 insertions(+), 182 deletions(-) diff --git a/src/mritk/statistics/cli.py b/src/mritk/statistics/cli.py index 9ad6074..b49957b 100644 --- a/src/mritk/statistics/cli.py +++ b/src/mritk/statistics/cli.py @@ -49,26 +49,26 @@ def compute_mri_stats( dataframes = [] # Loop through MRI paths - with console.status("[bold green]Processing MRIs...[/bold green]"): - for i, path in enumerate(mri): - console.print(f"[blue]Processing MRI {i + 1}/{len(mri)}:[/blue] {path.name}") - - try: - # Call the logic function - df = generate_stats_dataframe( - seg_path=segmentation, - mri_path=path, - timestamp_path=timetable, - timestamp_sequence=timelabel, - seg_pattern=seg_regex, - mri_data_pattern=mri_regex, - lut_path=lut, - info_dict=info_dict, - ) - dataframes.append(df) - except Exception as e: - console.print(f"[bold red]Failed to process {path.name}:[/bold red] {e}") - sys.exit(1) + console.print("[bold green]Processing MRIs...[/bold green]") + for i, path in enumerate(mri): + # console.print(f"[blue]Processing MRI {i + 1}/{len(mri)}:[/blue] {path.name}") + + try: + # Call the logic function + df = generate_stats_dataframe( + seg_path=segmentation, + mri_path=path, + timestamp_path=timetable, + timestamp_sequence=timelabel, + seg_pattern=seg_regex, + mri_data_pattern=mri_regex, + lut_path=lut, + info_dict=info_dict, + ) + dataframes.append(df) + except Exception as e: + console.print(f"[bold red]Failed to process {path.name}:[/bold red] {e}") + sys.exit(1) if dataframes: final_df = pd.concat(dataframes) @@ -153,7 +153,7 @@ def get_stats_value(stats_file: Path, region: str, info: str, **kwargs): def add_arguments(parser: argparse.ArgumentParser): - subparsers = parser.add_subparsers(dest="command", help="Available commands") + subparsers = parser.add_subparsers(dest="stats-command", help="Available commands") # --- Compute Command --- parser_compute = subparsers.add_parser( @@ -201,7 +201,7 @@ def add_arguments(parser: argparse.ArgumentParser): def dispatch(args: dict[str, typing.Any]): - command = args.pop("command") + command = args.pop("stats-command") if command == "compute": compute_mri_stats(**args) elif command == "get": diff --git a/src/mritk/statistics/compute_stats.py b/src/mritk/statistics/compute_stats.py index b38388f..7234d46 100644 --- a/src/mritk/statistics/compute_stats.py +++ b/src/mritk/statistics/compute_stats.py @@ -10,8 +10,7 @@ import re import numpy as np import pandas as pd -import tqdm -import click +import tqdm.rich from ..data.io import load_mri_data from ..data.orientation import assert_same_space @@ -20,89 +19,6 @@ from .utils import voxel_count_to_ml_scale, find_timestamp, prepend_info -@click.group() -def mristats(): - pass - - -@mristats.command("compute") -@click.option("--segmentation", "-s", "seg_path", type=Path, required=True) -@click.option("--mri", "-m", "mri_paths", multiple=True, type=Path, required=True) -@click.option("--output", "-o", type=Path, required=True) -@click.option("--timetable", "-t", type=Path) -@click.option("--timelabel", "-l", "timetable_sequence", type=str) -@click.option("--seg_regex", "-sr", "seg_pattern", type=str) -@click.option("--mri_regex", "-mr", "mri_data_pattern", type=str) -@click.option("--lut", "-lt", "lut_path", type=Path) -@click.option("--info", "-i", "info_dict", type=dict) -## FIXME : Need to check that all the given mri in mri_paths -## are registered to the same baseline MRI - this is done in create_dataframe -def compute_mri_stats( - seg_path: str | Path, - mri_paths: tuple[str | Path], - output: str | Path, - timetable: Optional[str | Path], - timetable_sequence: Optional[str | Path], - seg_pattern: Optional[str | Path], - mri_data_pattern: Optional[str | Path], - lut_path: Optional[Path] = None, - info_dict: Optional[dict] = None, -): - if not Path(seg_path).exists(): - raise RuntimeError(f"Missing segmentation: {seg_path}") - - for path in mri_paths: - if not Path(path).exists(): - raise RuntimeError(f"Missing: {path}") - - dataframes = [ - generate_stats_dataframe( - Path(seg_path), - Path(path), - timetable, - timetable_sequence, - seg_pattern, - mri_data_pattern, - lut_path, - info_dict, - ) - for path in mri_paths - ] - pd.concat(dataframes).to_csv(output, sep=";", index=False) - - -# FIXME : This function with one mri_path but should be able to handle dataframe with multiple MRIs -@mristats.command("get") -@click.option("--stats_file", "-f", "stats_file", type=Path, required=True) -@click.option("--region", "-r", "region", type=str) -@click.option("--info", "-i", "info", type=str) -def get_stats_value(stats_file: str | Path, region: str, info: str): - assert region in default_segmentation_groups().keys() - assert info in [ - "sum", - "mean", - "median", - "std", - "min", - "max", - "PC1", - "PC5", - "PC25", - "PC75", - "PC90", - "PC95", - "PC99", - ] - - df = pd.read_csv(stats_file, sep=";") - - region_row = df.loc[df["description"] == region] - info_value = region_row[info].values[0] - print(f"{info}[{region}] = {info_value}") - - return info_value - - def generate_stats_dataframe( seg_path: Path, mri_path: Path, @@ -178,7 +94,8 @@ def generate_stats_dataframe( records = [] finite_mask = np.isfinite(mri.data) volscale = voxel_count_to_ml_scale(seg.affine) - for description, labels in tqdm.tqdm(regions.items()): + + for description, labels in tqdm.rich.tqdm(regions.items(), total=len(regions)): region_mask = np.isin(seg.data, labels) voxelcount = region_mask.sum() record = { diff --git a/test/conftest.py b/test/conftest.py index 29b9c5e..dbfe221 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -1,7 +1,8 @@ +from pathlib import Path import os import pytest @pytest.fixture(scope="session") -def mri_data_dir(): - return os.getenv("MRITK_TEST_DATA_FOLDER", "test_data") +def mri_data_dir() -> Path: + return Path(os.getenv("MRITK_TEST_DATA_FOLDER", "test_data")) diff --git a/test/test_cli.py b/test/test_cli.py index 20189a9..6c1eb28 100644 --- a/test/test_cli.py +++ b/test/test_cli.py @@ -12,10 +12,11 @@ def test_cli_version(capsys): def test_cli_info(capsys, mri_data_dir): test_file = ( - mri_data_dir + "/mri-processed/mri_processed_data/sub-01/" - "concentrations/sub-01_ses-01_concentration.nii.gz" + mri_data_dir + / "mri-processed/mri_processed_data/sub-01" + / "concentrations/sub-01_ses-01_concentration.nii.gz" ) - args = ["info", test_file] + args = ["info", str(test_file)] cli.main(args) captured = capsys.readouterr() assert "Voxel Size (mm) (0.50, 0.50, 0.50)" in captured.out @@ -24,10 +25,11 @@ def test_cli_info(capsys, mri_data_dir): def test_cli_info_json(capsys, mri_data_dir): test_file = ( - mri_data_dir + "/mri-processed/mri_processed_data/sub-01/" - "concentrations/sub-01_ses-01_concentration.nii.gz" + mri_data_dir + / "mri-processed/mri_processed_data/sub-01" + / "concentrations/sub-01_ses-01_concentration.nii.gz" ) - args = ["info", test_file, "--json"] + args = ["info", str(test_file), "--json"] cli.main(args) captured = capsys.readouterr() data = json.loads(captured.out) diff --git a/test/test_mri_io.py b/test/test_mri_io.py index e66e14c..cf68b42 100644 --- a/test/test_mri_io.py +++ b/test/test_mri_io.py @@ -6,16 +6,15 @@ """ import numpy as np -import os - from mritk.data.io import load_mri_data, save_mri_data def test_mri_io_nifti(tmp_path, mri_data_dir): - input_file = os.path.join( - mri_data_dir, - "mri-processed/mri_dataset/derivatives/sub-01/ses-01/sub-01_ses-01_acq-mixed_T1map.nii.gz", + input_file = ( + mri_data_dir + / "mri-processed/mri_dataset/derivatives/sub-01/ses-01/sub-01_ses-01_acq-mixed_T1map.nii.gz" ) + output_file = tmp_path / "output_nifti.nii.gz" mri = load_mri_data(input_file, dtype=np.single, orient=False) ## TODO : Test orient=True case diff --git a/test/test_mri_stats.py b/test/test_mri_stats.py index 28e2304..638f8b7 100644 --- a/test/test_mri_stats.py +++ b/test/test_mri_stats.py @@ -5,21 +5,23 @@ Copyright (C) 2026 Simula Research Laboratory """ -import os -from click.testing import CliRunner from pathlib import Path -from mritk.statistics.compute_stats import generate_stats_dataframe, compute_mri_stats +from mritk.statistics.compute_stats import generate_stats_dataframe # , compute_mri_stats +import mritk.cli as cli -def test_compute_stats_default(mri_data_dir): - seg_path = os.path.join( - mri_data_dir, - "mri-processed/mri_processed_data/sub-01/segmentations/sub-01_seg-aparc+aseg_refined.nii.gz", + +def test_compute_stats_default(mri_data_dir: Path): + seg_path = ( + mri_data_dir + / "mri-processed/mri_processed_data/sub-01" + / "segmentations/sub-01_seg-aparc+aseg_refined.nii.gz" ) - mri_path = os.path.join( - mri_data_dir, - "mri-processed/mri_processed_data/sub-01/concentrations/sub-01_ses-01_concentration.nii.gz", + mri_path = ( + mri_data_dir + / "mri-processed/mri_processed_data/sub-01" + / "concentrations/sub-01_ses-01_concentration.nii.gz" ) dataframe = generate_stats_dataframe(seg_path, mri_path) @@ -52,14 +54,16 @@ def test_compute_stats_default(mri_data_dir): } -def test_compute_stats_patterns(mri_data_dir): - seg_path = os.path.join( - mri_data_dir, - "mri-processed/mri_processed_data/sub-01/segmentations/sub-01_seg-aparc+aseg_refined.nii.gz", +def test_compute_stats_patterns(mri_data_dir: Path): + seg_path = ( + mri_data_dir + / "mri-processed/mri_processed_data/sub-01" + / "segmentations/sub-01_seg-aparc+aseg_refined.nii.gz" ) - mri_path = os.path.join( - mri_data_dir, - "mri-processed/mri_processed_data/sub-01/concentrations/sub-01_ses-01_concentration.nii.gz", + mri_path = ( + mri_data_dir + / "mri-processed/mri_processed_data/sub-01" + / "concentrations/sub-01_ses-01_concentration.nii.gz" ) seg_pattern = "(?Psub-(control|patient)*\\d{2})_seg-(?P[^\\.]+)" mri_data_pattern = ( @@ -80,20 +84,22 @@ def test_compute_stats_patterns(mri_data_dir): assert dataframe["session"].iloc[0] == "ses-01" -def test_compute_stats_timestamp(mri_data_dir): - seg_path = os.path.join( - mri_data_dir, - "mri-processed/mri_processed_data/sub-01/segmentations/sub-01_seg-aparc+aseg_refined.nii.gz", +def test_compute_stats_timestamp(mri_data_dir: Path): + seg_path = ( + mri_data_dir + / "mri-processed/mri_processed_data/sub-01" + / "segmentations/sub-01_seg-aparc+aseg_refined.nii.gz" ) - mri_path = os.path.join( - mri_data_dir, - "mri-processed/mri_processed_data/sub-01/concentrations/sub-01_ses-01_concentration.nii.gz", + mri_path = ( + mri_data_dir + / "mri-processed/mri_processed_data/sub-01" + / "concentrations/sub-01_ses-01_concentration.nii.gz" ) seg_pattern = "(?Psub-(control|patient)*\\d{2})_seg-(?P[^\\.]+)" mri_data_pattern = ( "(?Psub-(control|patient)*\\d{2})_(?Pses-\\d{2})_(?P[^\\.]+)" ) - timetable = os.path.join(mri_data_dir, "timetable/timetable.tsv") + timetable = mri_data_dir / "timetable/timetable.tsv" timetable_sequence = "mixed" dataframe = generate_stats_dataframe( @@ -108,14 +114,16 @@ def test_compute_stats_timestamp(mri_data_dir): assert dataframe["timestamp"].iloc[0] == -6414.9 -def test_compute_stats_info(mri_data_dir): - seg_path = os.path.join( - mri_data_dir, - "mri-processed/mri_processed_data/sub-01/segmentations/sub-01_seg-aparc+aseg_refined.nii.gz", +def test_compute_stats_info(mri_data_dir: Path): + seg_path = ( + mri_data_dir + / "mri-processed/mri_processed_data/sub-01" + / "segmentations/sub-01_seg-aparc+aseg_refined.nii.gz" ) - mri_path = os.path.join( - mri_data_dir, - "mri-processed/mri_processed_data/sub-01/concentrations/sub-01_ses-01_concentration.nii.gz", + mri_path = ( + mri_data_dir + / "mri-processed/mri_processed_data/sub-01" + / "concentrations/sub-01_ses-01_concentration.nii.gz" ) info = { "mri_data": "concentration", @@ -133,40 +141,44 @@ def test_compute_stats_info(mri_data_dir): assert dataframe["session"].iloc[0] == "ses-01" -def test_compute_mri_stats_cli(tmp_path, mri_data_dir): - runner = CliRunner() - seg_path = os.path.join( - mri_data_dir, - "mri-processed/mri_processed_data/sub-01/segmentations/sub-01_seg-aparc+aseg_refined.nii.gz", +def test_compute_mri_stats_cli(capsys, tmp_path: Path, mri_data_dir: Path): + seg_path = ( + mri_data_dir + / "mri-processed/mri_processed_data/sub-01" + / "segmentations/sub-01_seg-aparc+aseg_refined.nii.gz" ) - mri_path = os.path.join( - mri_data_dir, - "mri-processed/mri_processed_data/sub-01/concentrations/sub-01_ses-01_concentration.nii.gz", + mri_path = ( + mri_data_dir + / "mri-processed/mri_processed_data/sub-01" + / "concentrations/sub-01_ses-01_concentration.nii.gz" ) seg_pattern = "(?Psub-(control|patient)*\\d{2})_seg-(?P[^\\.]+)" mri_data_pattern = ( "(?Psub-(control|patient)*\\d{2})_(?Pses-\\d{2})_(?P[^\\.]+)" ) - timetable = os.path.join(mri_data_dir, "timetable/timetable.tsv") + timetable = mri_data_dir / "timetable/timetable.tsv" timetable_sequence = "mixed" - result = runner.invoke( - compute_mri_stats, - [ - "--segmentation", - seg_path, - "--mri", - mri_path, - "--output", - Path(str(tmp_path / "mri_stats_output.csv")), - "--timetable", - timetable, - "--timelabel", - timetable_sequence, - "--seg_regex", - seg_pattern, - "--mri_regex", - mri_data_pattern, - ], - ) - assert result.exit_code == 0 + args = [ + "--segmentation", + str(seg_path), + "--mri", + str(mri_path), + "--output", + str(tmp_path / "mri_stats_output.csv"), + "--timetable", + str(timetable), + "--timelabel", + timetable_sequence, + "--seg_regex", + seg_pattern, + "--mri_regex", + mri_data_pattern, + ] + + ret = cli.main(["stats", "compute"] + args) + assert ret == 0 + captured = capsys.readouterr() + assert "Processing MRIs..." in captured.out + assert "Stats successfully saved to" in captured.out + assert (tmp_path / "mri_stats_output.csv").exists()