Dr. Anneke Meyer
Publications
2023

Bashkanov, O; Rak, M; Meyer, A; Engelage, L; Lumiani, A; Muschter, R; Hansen, C
Automatic detection of prostate cancer grades and chronic prostatitis in biparametric MRI Journal Article
In: Computer Methods and Programs in Biomedicine, vol. 239, pp. 107624, 2023, ISSN: 0169-2607.
@article{bashkanov_automatic_2023,
title = {Automatic detection of prostate cancer grades and chronic prostatitis in biparametric MRI},
author = {O Bashkanov and M Rak and A Meyer and L Engelage and A Lumiani and R Muschter and C Hansen},
url = {https://www.sciencedirect.com/science/article/pii/S0169260723002894},
doi = {10.1016/j.cmpb.2023.107624},
issn = {0169-2607},
year = {2023},
date = {2023-09-01},
urldate = {2023-09-01},
journal = {Computer Methods and Programs in Biomedicine},
volume = {239},
pages = {107624},
abstract = {Background and objective:With emerging evidence to improve prostate cancer (PCa) screening, multiparametric magnetic prostate imaging is becoming an essential noninvasive component of the diagnostic routine. Computer-aided diagnostic (CAD) tools powered by deep learning can help radiologists interpret multiple volumetric images. In this work, our objective was to examine promising methods recently proposed in the multigrade prostate cancer detection task and to suggest practical considerations regarding model training in this context. Methods:We collected 1647 fine-grained biopsy-confirmed findings, including Gleason scores and prostatitis, to form a training dataset. In our experimental framework for lesion detection, all models utilized 3D nnU-Net architecture that accounts for anisotropy in the MRI data. First, we explore an optimal range of b-values for diffusion-weighted imaging (DWI) modality and its effect on the detection of clinically significant prostate cancer (csPCa) and prostatitis using deep learning, as the optimal range is not yet clearly defined in this domain. Next, we propose a simulated multimodal shift as a data augmentation technique to compensate for the multimodal shift present in the data. Third, we study the effect of incorporating the prostatitis class alongside cancer-related findings at three different granularities of the prostate cancer class (coarse, medium, and fine) and its impact on the detection rate of the target csPCa. Furthermore, ordinal and one-hot encoded (OHE) output formulations were tested. Results: An optimal model configuration with fine class granularity (prostatitis included) and OHE has scored the lesion-wise partial Free-Response Receiver Operating Characteristic (FROC) area under the curve (AUC) of 1.94 (CI 95%: 1.76–2.11) and patient-wise ROC AUC of 0.874 (CI 95%: 0.793–0.938) in the detection of csPCa. Inclusion of the auxiliary prostatitis class has demonstrated a stable relative improvement in specificity at a false positive rate (FPR) of 1.0 per patient, with an increase of 3%, 7%, and 4% for coarse, medium, and fine class granularities. Conclusions: This paper examines several configurations for model training in the biparametric MRI setup and proposes optimal value ranges. It also shows that the fine-grained class configuration, including prostatitis, is beneficial for detecting csPCa. The ability to detect prostatitis in all low-risk cancer lesions suggests the potential to improve the quality of the early diagnosis of prostate diseases. It also implies an improved interpretability of the results by the radiologist.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2022

Gulamhussene, G; Meyer, A; Rak, M; Bashkanov, O; Omari, J; Pech, M; Hansen, C
Predicting 4D liver MRI for MR-guided interventions Journal Article
In: Computerized Medical Imaging and Graphics, vol. 101, pp. 102122, 2022, ISSN: 0895-6111.
@article{gulamhussene_predicting_2022,
title = {Predicting 4D liver MRI for MR-guided interventions},
author = {G Gulamhussene and A Meyer and M Rak and O Bashkanov and J Omari and M Pech and C Hansen},
url = {https://www.sciencedirect.com/science/article/pii/S0895611122000921},
doi = {10.1016/j.compmedimag.2022.102122},
issn = {0895-6111},
year = {2022},
date = {2022-10-01},
urldate = {2022-10-01},
journal = {Computerized Medical Imaging and Graphics},
volume = {101},
pages = {102122},
abstract = {Organ motion poses an unresolved challenge in image-guided interventions like radiation therapy, biopsies or tumor ablation. In the pursuit of solving this problem, the research field of time-resolved volumetric magnetic resonance imaging (4D MRI) has evolved. However, current techniques are unsuitable for most interventional settings because they lack sufficient temporal and/or spatial resolution or have long acquisition times. In this work, we propose a novel approach for real-time, high-resolution 4D MRI with large fields of view for MR-guided interventions. To this end, we propose a network-agnostic, end-to-end trainable, deep learning formulation that enables the prediction of a 4D liver MRI with respiratory states from a live 2D navigator MRI. Our method can be used in two ways: First, it can reconstruct high quality fast (near real-time) 4D MRI with high resolution (209×128×128 matrix size with isotropic 1.8mm voxel size and 0.6s/volume) given a dynamic interventional 2D navigator slice for guidance during an intervention. Second, it can be used for retrospective 4D reconstruction with a temporal resolution of below 0.2s/volume for motion analysis and use in radiation therapy. We report a mean target registration error (TRE) of 1.19±0.74mm, which is below voxel size. We compare our results with a state-of-the-art retrospective 4D MRI reconstruction. Visual evaluation shows comparable quality. We compare different network architectures within our formulation. We show that small training sizes with short acquisition times down to 2 min can already achieve promising results and 24 min are sufficient for high quality results. Because our method can be readily combined with earlier time reducing methods, acquisition time can be further decreased while also limiting quality loss. We show that an end-to-end, deep learning formulation is highly promising for 4D MRI reconstruction.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Meyer, A
2022.
@phdthesis{meyer_exploiting_2022,
title = {Exploiting Supplementary Data and Knowledge for Improved CNN-based Segmentation of Prostate Structures in T2-weighted MRI},
author = {A Meyer},
url = {https://opendata.uni-halle.de/bitstream/1981185920/91810/1/Meyer_Anneke_Dissertation_2022.pdf},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
abstract = {Magnetic resonance imaging (MRI) is gaining increasing importance for
the diagnosis and treatment of prostate cancer (PCa). One integral part
in the analysis of MRI scans is the segmentation of prostate structures,
which are needed for multiple tasks in clinical assessment of PCa, and
for the planning and monitoring of therapeutic interventions.
Convolutional neural networks (CNNs) have proven to be the top
choice for many computer vision tasks, including medical image analysis.
Consequently, a large body of research has been carried out on CNN-
based segmentation of the prostate whole gland and its subdivision into
two anatomical zones: the peripheral zone (PZ) and the transition zone
(TZ). Far less research has been conducted on the segmentation of other
structures that are relevant in PCa assessment and treatment planning.
In this thesis, we set out to close this gap by investigating not only an
improved segmentation of the whole gland, but extending the automatic
segmentation to a more detailed division of the interior prostate gland,
and to adjacent structures that are relevant for reducing the risks of
adverse therapy side effects.
In this context, we contribute novel methods that leverage supplemen-
tary data from different levels of clinical datasets to improve the accuracy
and robustness of CNN algorithms for prostate structure segmentation.
With our work, we aim to mitigate challenges in their development with
respect to prostate structures segmentation in specific, and CNN-based
methods in general. These challenges include the quality of underlying
images, the necessity of a large amount of labeled training data, and
the performance drop due to domain shift.
To overcome the lower image quality in parts of the prostate on axial
MRI scan directions, we propose a 3D anisotropic multi-stream CNN.
Our method improves the segmentation performance for the prostate
by allowing for incorporation of multiple scan directions. Moreover,
we contribute a novel, semi-supervised learning algorithm to leverage
unlabeled data for improving the segmentation outcomes and reducing
the CNN’s demand for labeled data. Lastly, we exploit that, although
the CNN’s performance drops on data from different distributions, its
knowledge can be used to improve in the new domain. We introduce
a simple yet effective semi-supervised domain adaptation technique
that improves the segmentation quality in the new domain with only
small amounts of labelled data. With our proposed methods, this thesis
takes a further step towards reliable automatic segmentation of prostate
structures. Thereby, we do not only focus on the improvement of the
CNN algorithms, but we also introduce means to make the methods
more applicable in practice.},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
the diagnosis and treatment of prostate cancer (PCa). One integral part
in the analysis of MRI scans is the segmentation of prostate structures,
which are needed for multiple tasks in clinical assessment of PCa, and
for the planning and monitoring of therapeutic interventions.
Convolutional neural networks (CNNs) have proven to be the top
choice for many computer vision tasks, including medical image analysis.
Consequently, a large body of research has been carried out on CNN-
based segmentation of the prostate whole gland and its subdivision into
two anatomical zones: the peripheral zone (PZ) and the transition zone
(TZ). Far less research has been conducted on the segmentation of other
structures that are relevant in PCa assessment and treatment planning.
In this thesis, we set out to close this gap by investigating not only an
improved segmentation of the whole gland, but extending the automatic
segmentation to a more detailed division of the interior prostate gland,
and to adjacent structures that are relevant for reducing the risks of
adverse therapy side effects.
In this context, we contribute novel methods that leverage supplemen-
tary data from different levels of clinical datasets to improve the accuracy
and robustness of CNN algorithms for prostate structure segmentation.
With our work, we aim to mitigate challenges in their development with
respect to prostate structures segmentation in specific, and CNN-based
methods in general. These challenges include the quality of underlying
images, the necessity of a large amount of labeled training data, and
the performance drop due to domain shift.
To overcome the lower image quality in parts of the prostate on axial
MRI scan directions, we propose a 3D anisotropic multi-stream CNN.
Our method improves the segmentation performance for the prostate
by allowing for incorporation of multiple scan directions. Moreover,
we contribute a novel, semi-supervised learning algorithm to leverage
unlabeled data for improving the segmentation outcomes and reducing
the CNN’s demand for labeled data. Lastly, we exploit that, although
the CNN’s performance drops on data from different distributions, its
knowledge can be used to improve in the new domain. We introduce
a simple yet effective semi-supervised domain adaptation technique
that improves the segmentation quality in the new domain with only
small amounts of labelled data. With our proposed methods, this thesis
takes a further step towards reliable automatic segmentation of prostate
structures. Thereby, we do not only focus on the improvement of the
CNN algorithms, but we also introduce means to make the methods
more applicable in practice.
2021

Meyer, A; Mehrtash, A; Rak, M; Bashkanov, O; Langbein, B; Ziaei, A; Kibel, A; Tempany, C; Hansen, C; Tokuda, J
Domain adaptation for segmentation of critical structures for prostate cancer therapy Journal Article
In: Scientific Reports, vol. 11, no. 1, pp. 11480, 2021, ISSN: 2045-2322.
@article{meyer_domain_2021,
title = {Domain adaptation for segmentation of critical structures for prostate cancer therapy},
author = {A Meyer and A Mehrtash and M Rak and O Bashkanov and B Langbein and A Ziaei and A Kibel and C Tempany and C Hansen and J Tokuda},
url = {https://www.nature.com/articles/s41598-021-90294-4},
doi = {10.1038/s41598-021-90294-4},
issn = {2045-2322},
year = {2021},
date = {2021-06-01},
urldate = {2021-06-01},
journal = {Scientific Reports},
volume = {11},
number = {1},
pages = {11480},
abstract = {Preoperative assessment of the proximity of critical structures to the tumors is crucial in avoiding
unnecessary damage during prostate cancer treatment. A patient-specific 3D anatomical model
of those structures, namely the neurovascular bundles (NVB) and the external urethral sphincters
(EUS), can enable physicians to perform such assessments intuitively. As a crucial step to generate a
patient-specific anatomical model from preoperative MRI in a clinical routine, we propose a multi-class
automatic segmentation based on an anisotropic convolutional network. Our specific challenge is to
train the network model on a unique source dataset only available at a single clinical site and deploy it
to another target site without sharing the original images or labels. As network models trained on data
from a single source suffer from quality loss due to the domain shift, we propose a semi-supervised
domain adaptation (DA) method to refine the model’s performance in the target domain. Our DA
method combines transfer learning and uncertainty guided self-learning based on deep ensembles.
Experiments on the segmentation of the prostate, NVB, and EUS, show significant performance gain
with the combination of those techniques compared to pure TL and the combination of TL with simple
self-learning ( p < 0.005 for all structures using a Wilcoxon’s signed-rank test). Results on a different
task and data (Pancreas CT segmentation) demonstrate our method’s generic application capabilities.
Our method has the advantage that it does not require any further data from the source domain,
unlike the majority of recent domain adaptation strategies. This makes our method suitable for clinical
applications, where the sharing of patient data is restricted.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
unnecessary damage during prostate cancer treatment. A patient-specific 3D anatomical model
of those structures, namely the neurovascular bundles (NVB) and the external urethral sphincters
(EUS), can enable physicians to perform such assessments intuitively. As a crucial step to generate a
patient-specific anatomical model from preoperative MRI in a clinical routine, we propose a multi-class
automatic segmentation based on an anisotropic convolutional network. Our specific challenge is to
train the network model on a unique source dataset only available at a single clinical site and deploy it
to another target site without sharing the original images or labels. As network models trained on data
from a single source suffer from quality loss due to the domain shift, we propose a semi-supervised
domain adaptation (DA) method to refine the model’s performance in the target domain. Our DA
method combines transfer learning and uncertainty guided self-learning based on deep ensembles.
Experiments on the segmentation of the prostate, NVB, and EUS, show significant performance gain
with the combination of those techniques compared to pure TL and the combination of TL with simple
self-learning ( p < 0.005 for all structures using a Wilcoxon’s signed-rank test). Results on a different
task and data (Pancreas CT segmentation) demonstrate our method’s generic application capabilities.
Our method has the advantage that it does not require any further data from the source domain,
unlike the majority of recent domain adaptation strategies. This makes our method suitable for clinical
applications, where the sharing of patient data is restricted.

Bashkanov, O; Meyer, A; Schindele, D; Schostak, M; Tönnies, K; Hansen, C; Rak, M
Learning Multi-Modal Volumetric Prostate Registration With Weak Inter-Subject Spatial Correspondence Proceedings Article
In: 2021 IEEE 18th International Symposium on Biomedical Imaging (ISBI), pp. 1817–1821, 2021, (ISSN: 1945-8452).
@inproceedings{bashkanov_learning_2021,
title = {Learning Multi-Modal Volumetric Prostate Registration With Weak Inter-Subject Spatial Correspondence},
author = {O Bashkanov and A Meyer and D Schindele and M Schostak and K Tönnies and C Hansen and M Rak},
url = {https://ieeexplore.ieee.org/abstract/document/9433848},
doi = {10.1109/ISBI48211.2021.9433848},
year = {2021},
date = {2021-04-01},
urldate = {2021-04-01},
booktitle = {2021 IEEE 18th International Symposium on Biomedical Imaging (ISBI)},
pages = {1817–1821},
abstract = {Recent studies demonstrated the eligibility of convolutional neural networks (CNNs) for solving the image registration problem. CNNs enable faster transformation estimation and greater generalization capability needed for better support during medical interventions. Conventional fully-supervised training requires a lot of high-quality ground truth data such as voxel-to-voxel transformations, which typically are attained in a too tedious and error-prone manner. In our work, we use weakly-supervised learning, which optimizes the model indirectly only via segmentation masks that are a more accessible ground truth than the deformation fields. Concerning the weak supervision, we investigate two segmentation similarity measures: multiscale Dice similarity coefficient (mDSC) and the similarity between segmentation-derived signed distance maps (SDMs). We show that the combination of mDSC and SDM similarity measures results in a more accurate and natural transformation pattern together with a stronger gradient coverage. Furthermore, we introduce an auxiliary input to the neural network for the prior information about the prostate location in the MR sequence, which mostly is available preoperatively. This approach significantly outperforms the standard two-input models. With weakly labelled MR-TRUS prostate data, we showed registration quality comparable to the state-of-the-art deep learning-based method.},
note = {ISSN: 1945-8452},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Meyer, A; Ghosh, S; Schindele, D; Schostak, M; Stober, S; Hansen, C; Rak, M
Uncertainty-Aware Temporal Self-Learning (UATS): Semi-Supervised Learning for Segmentation of Prostate Zones and Beyond Miscellaneous
2021, (arXiv:2104.03840 [eess]).
@misc{meyer_uncertainty-aware_2021,
title = {Uncertainty-Aware Temporal Self-Learning (UATS): Semi-Supervised Learning for Segmentation of Prostate Zones and Beyond},
author = {A Meyer and S Ghosh and D Schindele and M Schostak and S Stober and C Hansen and M Rak},
url = {http://arxiv.org/abs/2104.03840},
doi = {10.48550/arXiv.2104.03840},
year = {2021},
date = {2021-04-01},
urldate = {2021-04-01},
publisher = {arXiv},
abstract = {Various convolutional neural network (CNN) based concepts have been introduced for the prostate's automatic segmentation and its coarse subdivision into transition zone (TZ) and peripheral zone (PZ). However, when targeting a fine-grained segmentation of TZ, PZ, distal prostatic urethra (DPU) and the anterior fibromuscular stroma (AFS), the task becomes more challenging and has not yet been solved at the level of human performance. One reason might be the insufficient amount of labeled data for supervised training. Therefore, we propose to apply a semi-supervised learning (SSL) technique named uncertainty-aware temporal self-learning (UATS) to overcome the expensive and time-consuming manual ground truth labeling. We combine the SSL techniques temporal ensembling and uncertainty-guided self-learning to benefit from unlabeled images, which are often readily available. Our method significantly outperforms the supervised baseline and obtained a Dice coefficient (DC) of up to 78.9% , 87.3%, 75.3%, 50.6% for TZ, PZ, DPU and AFS, respectively. The obtained results are in the range of human inter-rater performance for all structures. Moreover, we investigate the method's robustness against noise and demonstrate the generalization capability for varying ratios of labeled data and on other challenging tasks, namely the hippocampus and skin lesion segmentation. UATS achieved superiority segmentation quality compared to the supervised baseline, particularly for minimal amounts of labeled data.},
note = {arXiv:2104.03840 [eess]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}

Meyer, A; Chlebus, G; Rak, M; Schindele, D; Schostak, M; Ginneken, B; Schenk, A; Meine, H; Hahn, H; Schreiber, A; Hansen, C
Anisotropic 3D Multi-Stream CNN for Accurate Prostate Segmentation from Multi-Planar MRI Journal Article
In: Computer Methods and Programs in Biomedicine, vol. 200, pp. 105821, 2021, ISSN: 01692607, (arXiv:2009.11120 [eess]).
@article{meyer_anisotropic_2021,
title = {Anisotropic 3D Multi-Stream CNN for Accurate Prostate Segmentation from Multi-Planar MRI},
author = {A Meyer and G Chlebus and M Rak and D Schindele and M Schostak and B Ginneken and A Schenk and H Meine and H Hahn and A Schreiber and C Hansen},
url = {http://arxiv.org/abs/2009.11120},
doi = {10.1016/j.cmpb.2020.105821},
issn = {01692607},
year = {2021},
date = {2021-03-01},
urldate = {2021-03-01},
journal = {Computer Methods and Programs in Biomedicine},
volume = {200},
pages = {105821},
abstract = {Background and Objective: Accurate and reliable segmentation of the prostate gland in MR images can support the clinical assessment of prostate cancer, as well as the planning and monitoring of focal and loco-regional therapeutic interventions. Despite the availability of multi-planar MR scans due to standardized protocols, the majority of segmentation approaches presented in the literature consider the axial scans only. Methods: We propose an anisotropic 3D multi-stream CNN architecture, which processes additional scan directions to produce a higher-resolution isotropic prostate segmentation. We investigate two variants of our architecture, which work on two (dual-plane) and three (triple-plane) image orientations, respectively. We compare them with the standard baseline (single-plane) used in literature, i.e., plain axial segmentation. To realize a fair comparison, we employ a hyperparameter optimization strategy to select optimal configurations for the individual approaches. Results: Training and evaluation on two datasets spanning multiple sites obtain statistical significant improvement over the plain axial segmentation ($p<0.05$ on the Dice similarity coefficient). The improvement can be observed especially at the base ($0.898$ single-plane vs. $0.906$ triple-plane) and apex ($0.888$ single-plane vs. $0.901$ dual-plane). Conclusion: This study indicates that models employing two or three scan directions are superior to plain axial segmentation. The knowledge of precise boundaries of the prostate is crucial for the conservation of risk structures. Thus, the proposed models have the potential to improve the outcome of prostate cancer diagnosis and therapies.},
note = {arXiv:2009.11120 [eess]},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2020

Schindele, D; Meyer, A; Reibnitz, D Von; Kiesswetter, V; Schostak, M; Rak, M; Hansen, C
High Resolution Prostate Segmentations for the ProstateX-Challenge Miscellaneous
2020.
@misc{schindele_high_2020,
title = {High Resolution Prostate Segmentations for the ProstateX-Challenge},
author = {D Schindele and A Meyer and D Von Reibnitz and V Kiesswetter and M Schostak and M Rak and C Hansen},
url = {https://www.cancerimagingarchive.net/analysis-result/prostatex-seg-hires/},
doi = {10.7937/TCIA.2019.DEG7ZG1U},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
publisher = {The Cancer Imaging Archive},
abstract = {We created 66 high resolution segmentations for randomly chosen T2-weighted volumes of the ProstateX challenge. The high resolution segmentations were obtained by considering the three scan directions: for each scan direction (axial, sagittal, coronal), the gland was manually delineated by a medical student, followed by a review and corrections of an expert urologist. These three anisotropic segmentations were fused to one isotropic segmentation by means of shape-based interpolation in the following manner: (1) The signed distance transformation of the three segmentations is computed. (2) The anisotropic distance volumes are transformed into an isotropic high-resolution representation with linear interpolation. (3) By averaging the distances, smoothing and thresholding them at zero, we obtained the fused segmentation. The resulting segmentations were manually verified and corrected further by the expert urologist if necessary. Serving as ground truth for training CNNs, these segmentations have the potential to improve the segmentation accuracy of automated algorithms. By considering not only the axial scans but also sagittal and coronal scan directions, we aimed to have higher fidelity of the segmentations especially at the apex and base regions of the prostate.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}

Meyer, A; Schindele, D; Reibnitz, D; Rak, M; Schostak, M; Hansen, C
PROSTATEx Zone Segmentations Miscellaneous
2020.
@misc{meyer_prostatex_2020,
title = {PROSTATEx Zone Segmentations},
author = {A Meyer and D Schindele and D Reibnitz and M Rak and M Schostak and C Hansen},
url = {https://www.cancerimagingarchive.net/analysis-result/prostatex-seg-zones/},
doi = {10.7937/TCIA.NBB4-4655},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
publisher = {The Cancer Imaging Archive},
abstract = {This collection contains prostate’s zonal segmentation for 98 cases randomly selected from the SPIE-AAPM-NCI PROSTATEx Challenge. The four-class segmentation encompasses the peripheral zone, transition zone, fibromuscular stroma and the distal prostatic urethra. As underlying images, we used transversal T2w scans. Segmentations were created by a medical student with experience in prostate segmentation and an expert urologist who instructed the student and double-checked the segmentations in the end. The DICOM representation of these segmentations were generated with dcmqi.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
2019

Rak, M; Steffen, J; Meyer, A; Hansen, C; Tönnies, K
Combining convolutional neural networks and star convex cuts for fast whole spine vertebra segmentation in MRI Journal Article
In: Computer Methods and Programs in Biomedicine, vol. 177, pp. 47–56, 2019, ISSN: 01692607.
@article{rak_combining_2019,
title = {Combining convolutional neural networks and star convex cuts for fast whole spine vertebra segmentation in MRI},
author = {M Rak and J Steffen and A Meyer and C Hansen and K Tönnies},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0169260718307417},
doi = {10.1016/j.cmpb.2019.05.003},
issn = {01692607},
year = {2019},
date = {2019-08-01},
urldate = {2019-08-01},
journal = {Computer Methods and Programs in Biomedicine},
volume = {177},
pages = {47–56},
abstract = {Methods: We address these limitations by a novel graph cut formulation based on vertebra patches extracted along the spine. For each patch, our formulation incorporates appearance and shape information derived from a task-specific convolutional neural network as well as star-convexity constraints that ensure a topologically correct segmentation of each vertebra. When segmenting vertebrae individually, ambiguities will occur due to overlapping segmentations of adjacent vertebrae. We tackle this problem by novel nonoverlap constraints between neighboring patches based on so-called encoding swaps. The latter allow us to obtain a globally optimal multi-label segmentation of all vertebrae in polynomial time.
Results: We validated our approach on two data sets. The first contains T1- and T2-weighted whole spine images of 64 subjects with varying health conditions. The second comprises 23 T2-weighted thoracolumbar images of young healthy adults and is publicly available. Our method yielded Dice coefficients of 93.8 ± 2.6 % and 96.0 ± 1.0 % for both data sets with a run time of 1.35 ± 0.08 s and 0.90 ± 0.03 s per vertebra on consumer hardware. A complete whole spine segmentation took 32.4 ± 1.92 s on average.
Conclusions: Our results are superior to those of previous works at a fraction of their run time, which illustrates the efficiency and effectiveness of our whole spine segmentation approach.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Results: We validated our approach on two data sets. The first contains T1- and T2-weighted whole spine images of 64 subjects with varying health conditions. The second comprises 23 T2-weighted thoracolumbar images of young healthy adults and is publicly available. Our method yielded Dice coefficients of 93.8 ± 2.6 % and 96.0 ± 1.0 % for both data sets with a run time of 1.35 ± 0.08 s and 0.90 ± 0.03 s per vertebra on consumer hardware. A complete whole spine segmentation took 32.4 ± 1.92 s on average.
Conclusions: Our results are superior to those of previous works at a fraction of their run time, which illustrates the efficiency and effectiveness of our whole spine segmentation approach.

Meyer, A; Rak, M; Schindele, D; Blaschke, S; Schostak, M; Fedorov, A; Hansen, C
Towards Patient-Individual PI-Rads v2 Sector Map: Cnn for Automatic Segmentation of Prostatic Zones From T2-Weighted MRI Proceedings Article
In: 2019 IEEE 16th International Symposium on Biomedical Imaging (ISBI 2019), pp. 696–700, IEEE, Venice, Italy, 2019, ISBN: 978-1-5386-3641-1.
@inproceedings{meyer_towards_2019,
title = {Towards Patient-Individual PI-Rads v2 Sector Map: Cnn for Automatic Segmentation of Prostatic Zones From T2-Weighted MRI},
author = {A Meyer and M Rak and D Schindele and S Blaschke and M Schostak and A Fedorov and C Hansen},
url = {https://ieeexplore.ieee.org/document/8759572/},
doi = {10.1109/ISBI.2019.8759572},
isbn = {978-1-5386-3641-1},
year = {2019},
date = {2019-04-01},
urldate = {2019-04-01},
booktitle = {2019 IEEE 16th International Symposium on Biomedical Imaging (ISBI 2019)},
pages = {696–700},
publisher = {IEEE},
address = {Venice, Italy},
abstract = {Automatic segmentation of the prostate, its inner and surrounding structures is highly desired for various applications. Several works have been presented for segmentation of anatomical zones of the prostate that are limited to the transition and peripheral zone. Following the spatial division according to the PI-RADS v2 sector map, we present a multi-class segmentation method that additionally targets the anterior fibromuscular stroma and distal prostatic urethra to improve computer-aided detection methods and enable a more precise therapy planning. We propose a multi-class segmentation with an anisotropic convolutional neural network that generates a topologically correct division of the prostate into these four structures. We evaluated our method on a dataset of T2-weighted axial MRI scans (n=98 subjects) and obtained results in the range of inter-rater variability for the majority of the zones.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2018

Meyer, A; Mehrtash, A; Rak, M; Schindele, D; Schostak, M; Tempany, C; Kapur, T; Abolmaesumi, P; Fedorov, A; Hansen, C
Automatic high resolution segmentation of the prostate from multi-planar MRI Proceedings Article
In: 2018 IEEE 15th International Symposium on Biomedical Imaging (ISBI 2018), pp. 177–181, IEEE, Washington, DC, 2018, ISBN: 978-1-5386-3636-7.
@inproceedings{meyer_automatic_2018,
title = {Automatic high resolution segmentation of the prostate from multi-planar MRI},
author = {A Meyer and A Mehrtash and M Rak and D Schindele and M Schostak and C Tempany and T Kapur and P Abolmaesumi and A Fedorov and C Hansen},
url = {https://ieeexplore.ieee.org/document/8363549/},
doi = {10.1109/ISBI.2018.8363549},
isbn = {978-1-5386-3636-7},
year = {2018},
date = {2018-04-01},
urldate = {2018-04-01},
booktitle = {2018 IEEE 15th International Symposium on Biomedical Imaging (ISBI 2018)},
pages = {177–181},
publisher = {IEEE},
address = {Washington, DC},
abstract = {Individualized and accurate segmentations of the prostate are essential for diagnosis as well as therapy planning in prostate cancer (PCa). Most of the previously proposed prostate segmentation approaches rely purely on axial MRI scans, which suffer from low out-of-plane resolution. We propose a method that makes use of sagittal and coronal MRI scans to improve the accuracy of segmentation. These scans are typically acquired as standard of care for PCa staging, but are generally ignored by the segmentation algorithms. Our method is based on a multi-stream 3D convolutional neural network for the automatic extraction of isotropic high resolution segmentations from MR images. We evaluated segmentation performance on an isotropic high resolution ground truth (n = 40 subjects). The results show that the use of multi-planar volumes for prostate segmentation leads to improved segmentation results not only for the whole prostate (92.1% Dice similarity coefficient), but also in apex and base regions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2015
Meyer, A; Schnurr, A; Schwalbe, M; Weber, S; Hansen, C
AngioPlan: A Software Assistant to Support the Treatment of Arterio-Venous Malformations Journal Article
In: 2015.
@article{meyer_angioplan_2015,
title = {AngioPlan: A Software Assistant to Support the Treatment of Arterio-Venous Malformations},
author = {A Meyer and A Schnurr and M Schwalbe and S Weber and C Hansen},
url = {https://www.var.ovgu.de/pub/Meyer_2015_CARS.pdf},
year = {2015},
date = {2015-01-01},
keywords = {},
pubstate = {published},
tppubtype = {article}
}