Skip to content

Commit 4e7551b

Browse files
committed
new papers
1 parent 1569382 commit 4e7551b

File tree

5 files changed

+63
-8
lines changed

5 files changed

+63
-8
lines changed

_bibliography/papers.bib

Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,57 @@
33
44
@string{aps = {American Physical Society,}}
55
6+
@article{VanDerValk:2025,
7+
abbr = {},
8+
bibtex_show = {true},
9+
author = {van der Valk, Viktor and Atsma, Douwe and Scherptong, Roderick and Staring, Marius},
10+
title = {Explainable ECG analysis by explicit information disentanglement with VAEs},
11+
journal = {IEEE Transactions on Biomedical Engineering},
12+
volume = {},
13+
number = {},
14+
pages = {},
15+
year = {2025},
16+
pdf = {2025_j_TBME.pdf},
17+
html = {},
18+
arxiv = {},
19+
code = {},
20+
abstract = {},
21+
}
22+
23+
@article{Li:2025,
24+
abbr = {TMI},
25+
bibtex_show = {true},
26+
author = {Li, Tianran and Staring, Marius and Qiao, Yuchuan},
27+
title = {Efficient Large-Deformation Medical Image Registration via Recurrent Dynamic Correlation},
28+
journal = {IEEE Transactions on Medical Imaging},
29+
volume = {},
30+
number = {},
31+
pages = {},
32+
year = {2025},
33+
pdf = {2025_j_TMI.pdf},
34+
html = {https://doi.org/10.1109/TMI.2025.3630584},
35+
arxiv = {https://arxiv.org/abs/2510.22380},
36+
code = {},
37+
abstract = {Deformable image registration estimates voxel-wise correspondences between images through spatial transformations, and plays a key role in medical imaging. While deep learning methods have significantly reduced runtime, efficiently handling large deformations remains a challenging task. Convolutional networks aggregate local features but lack direct modeling of voxel correspondences, promoting recent works to explore explicit feature matching. Among them, voxel-to-region matching is more efficient for direct correspondence modeling by computing local correlation features within neighbourhoods, while region-to-region matching incurs higher redundancy due to excessive correlation pairs across large regions. However, the inherent locality of voxel-to-region matching hinders the capture of long-range correspondences required for large deformations. To address this, we propose a Recurrent Correlation-based framework that dynamically relocates the matching region toward more promising positions. At each step, local matching is performed with low cost, and the estimated offset guides the next search region, supporting efficient convergence toward large deformations. In addition, we uses a lightweight recurrent update module with memory capacity and decouples motion-related and texture features to suppress semantic redundancy. We conduct extensive experiments on brain MRI and abdominal CT datasets under two settings: with and without affine pre-registration. Results show our method exhibits a strong accuracy-computation trade-off, surpassing or matching the state-of-the-art performance. For example, it achieves comparable performance on the non-affine OASIS dataset, while using only 9.5% of the FLOPs and running 96% faster than RDP, a representative high-performing method.},
38+
}
39+
40+
@article{Chaves-de-Plaza:2025,
41+
abbr = {TVCG},
42+
bibtex_show = {true},
43+
author = {Chaves-de-Plaza, Nicolas and Raidou, Renata G. and Mody, Prerak P. and Staring, Marius and van Egmond, Ren{\'e}; and Vilanova, Anna and Hildebrandt, Klaus},
44+
title = {LoGCC: Local-to-Global Correlation Clustering for Scalar Field Ensembles},
45+
journal = {IEEE Transactions on Visualization and Computer Graphics},
46+
volume = {},
47+
number = {},
48+
pages = {},
49+
year = {2025},
50+
pdf = {2025_j_TVCG.pdf},
51+
html = {10.1109/TVCG.2025.3630550},
52+
arxiv = {},
53+
code = {},
54+
abstract = {},
55+
}
56+
657
@article{Du2025,
758
abbr = {},
859
bibtex_show = {true},

_bibliography/papers_abstracts.bib

Lines changed: 12 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -7,10 +7,12 @@ @inproceedings{VanDerLoo:2025a
77
booktitle = {European Society of Cardiology},
88
month = {August},
99
year = {2025},
10-
volume = {},
11-
pages = {},
12-
pdf = {},
13-
html = {https://test-digital-congress.escardio.org/ESC-Congress/sessions/15898-large-language-models-for-cardiovascular-disease-management},
10+
journal = {European Heart Journal},
11+
volume = {46},
12+
number = {Supplement 1},
13+
pages = {ehaf784.4503},
14+
pdf = {2025_a_EHJa.pdf},
15+
html = {https://doi.org/10.1093/eurheartj/ehaf784.4503},
1416
arxiv = {},
1517
code = {},
1618
abstract = {<b>Introduction:</b> Large volumes of invasive coronary angiography (ICA) reports are stored in electronic health records (EHRs) worldwide, but their free-text format limits research and machine learning applications. (1) Manual annotation remains the primary structuring method, a time-consuming and subjective process. (2) Recent advancements in natural language processing, particularly large language models (LLMs), offer a promising solution. LLMs excel in processing human language, making them valuable for automated medical data annotation. (3)|<b>Purpose:</b> Two LLM-based methods were developed and evaluated for the automated classification of ICA reports, facilitating efficient structuring of unstructured clinical data.<br><b>Methods:</b> ICA reports, written in Dutch, from patients with acute coronary syndrome were retrospectively collected from a local EHR system (2010-2022). A random subset of 1000 reports was manually annotated for occlusion, bypass graft presence, macrovascular coronary artery disease (CAD) (binary), intervention type (PCI, CABG or no intervention), and culprit vessel(s) (main and branch). Annotations were based on the cardiologists' final reports and were reviewed by two researchers, with discrepancies resolved through discussion. The data was randomly split in a training (n=700) and a testing (n=300) set. Two classification approaches were developed: (1) a few-shot prompt engineering (FS) method, using iteratively optimized prompts with a commercially available LLM and (2) a fine-tuning method (FT), trained on multiple state-of-the-art pretrained LLMs. Culprit vessel classification was correct only if predefined labels were exactly matched; partial matches were classified as incorrect. Model performance was assessed using accuracy, macro-averaged F1-scores, and recall. Statistical significance differences between models, were evaluated per label using bootstrap resampling and a paired t-test.<br><b>Results:</b> All ICA reports belonged to unique patients. The inter-observer agreement for manual annotation ranged from 81% (culprit branch) to 98% (graft label). The best fine-tuning results were obtained with RoBERTa, pretrained on multilingual datasets. (4) Average accuracy was 89% for the FT method and 88% for the FS method. Accuracy of culprit branch prediction and recall of the no CAD prediction were statistically significantly different between the models (FT outperformed FS in culprit branch, FS outperformed FT for no CAD). Most classification errors occurred in selecting the correct culprit branch(es).<br><b>Conclusion:</b> Both methods approached expert-level accuracy, enabling rapid and standardized classification of ICA reports, significantly reducing the time required for dataset creation in large-scale clinical research. Interestingly, the FT method reached similar or better results compared to the much larger FS model. Meaning that with a relatively small annotated dataset, similar results can be achieved in a much less costly manner.},
@@ -24,10 +26,12 @@ @inproceedings{VanDerLoo:2025b
2426
booktitle = {European Society of Cardiology},
2527
month = {August},
2628
year = {2025},
27-
volume = {},
28-
pages = {},
29-
pdf = {},
30-
html = {https://test-digital-congress.escardio.org/ESC-Congress/sessions/15897-artificial-intelligence-in-cardiac-imaging-transforming-diagnosis-and-treatment-strategies},
29+
journal = {European Heart Journal},
30+
volume = {46},
31+
number = {Supplement 1},
32+
pages = {ehaf784.4498},
33+
pdf = {2025_a_EHJb.pdf},
34+
html = {https://doi.org/10.1093/eurheartj/ehaf784.4498},
3135
arxiv = {},
3236
code = {},
3337
abstract = {<b>Introduction:</b> Large volumes of transthoracic echocardiography (TTE) reports are stored in electronic health records (EHRs) worldwide, but their free-text format limits utility for research and machine learning applications. (1) Manual annotation remains the primary structuring method, a time-consuming and subjective process. (2) Recent advancements in natural language processing, particularly large language models (LLMs), offer a promising solution. LLMs excel in processing human language, making them valuable for automated medical data annotation. (3)<br><b>Purpose:</b> Two LLM-based methods were developed and evaluated for the automated classification of TTE reports, facilitating efficient structuring of unstructured clinical data.<br><b>Methods:</b> TTE reports, written in Dutch, from post-myocardial infarction patients were retrospectively collected from a local EHR system (2010-2024). A random subset of 1000 reports was manually annotated for left ventricular (LV) function and valvular pathology. LV function was categorized as normal, mildly, moderately, severely impaired or unknown. Valve dysfunction was classified by type (none, regurgitation, stenosis, both, or unknown) and severity (none, mild, moderate, severe or unknown) based on cardiologists' final reports. Reports were reviewed by two researchers, with discrepancies resolved through discussion. Data was randomly split into training (n=700) and test (n=300) sets. Two classification approaches were developed: (1) a few-shot prompt engineering method (FS); Iteratively optimized, category-specific prompts with a commercially available LLM, (2) a fine-tuning method (FT ) trained on multiple state-of-the-art pretrained LLMs. Model performance was assessed using accuracy, F1-scores and recall. Statistical significance per label was evaluated using bootstrap resampling and a paired t-test.<br><b>Results:</b> TTE reports included 501 unique patients. The dataset exhibited class imbalance, reflecting real-world distributions, with most cases classified as mildly impaired LV function and no/mild valvular dysfunction. Inter-observer agreement was high (98%-100%). The best fine-tuning results were obtained with RoBERTa, pretrained on multilingual datasets. (4) FS outperformed FT in all categories, achieving 99% overall accuracy vs. 93%. All recall and F1-scores were statistically significantly better for the FS method. Most misclassifications involved distinguishing mildly impaired from normal valve function.<br><b>Conclusion:</b> The FS method approached expert-level accuracy, enabling rapid and standardized classification of TTE reports, significantly reducing the time required for dataset creation in large-scale clinical research. The FS method achieved superior classification accuracy over fine-tuning, likely due to model scale and class imbalance, based on the differences in recall and F1-score. Most errors occurred in distinguishing mild impairment from normal valve function, a distinction with limited clinical impact.},

assets/pdf/2025_a_EHJa.pdf

627 KB
Binary file not shown.

assets/pdf/2025_a_EHJb.pdf

656 KB
Binary file not shown.

assets/pdf/2025_j_TMI.pdf

14.2 MB
Binary file not shown.

0 commit comments

Comments
 (0)