Dr. Marko Rak
Publications
2025

Bashkanov, O; Rak, M; Engelage, L; Hansen, C
Augmenting Prostate MRI Dataset with Synthetic Volumetric Images from Zone-Conditioned Diffusion Generative Model Proceedings Article
In: Mukhopadhyay, A; Oksuz, Ilkay; Engelhardt, Sandy; Mehrof, Dorit; Yuan, Yixuan (Ed.): Deep Generative Models, pp. 160–168, Springer Nature Switzerland, Cham, 2025, ISBN: 978-3-031-72744-3.
@inproceedings{bashkanov_augmenting_2025,
title = {Augmenting Prostate MRI Dataset with Synthetic Volumetric Images from Zone-Conditioned Diffusion Generative Model},
author = {O Bashkanov and M Rak and L Engelage and C Hansen},
editor = {A Mukhopadhyay and Ilkay Oksuz and Sandy Engelhardt and Dorit Mehrof and Yixuan Yuan},
doi = {10.1007/978-3-031-72744-3_16},
isbn = {978-3-031-72744-3},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
booktitle = {Deep Generative Models},
pages = {160–168},
publisher = {Springer Nature Switzerland},
address = {Cham},
abstract = {The need for artificial intelligence (AI)-driven computer-assist ed diagnosis (CAD) tools drives up the demand for large high-quality datasets in medical imaging. However, collecting the necessary amount of data is often impractical due to patient privacy concerns or restricted time for medical annotation. Recent advances in generative models in medical imaging with a focus on diffusion-based techniques could provide realistic-looking synthetic samples as a supplement for real data. In this work, we study whether synthetic volumetric MRIs generated by the diffusion model can be used to train downstream models, e.g., semantic segmentation. We can create an arbitrarily large dataset with ground truth by conditioning the diffusion model with a segmentation mask. Thus, the additional synthetic data can be used to control the dataset diversity. Experiments revealed that downstream tasks profit from additional synthetic data. However, the effect will eventually diminish when sufficient real samples are available. We showcase the strength of the synthetic data and provide practical recommendations for using the generated data in zonal prostate segmentation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Bashkanov, O; Engelage, L; Behnel, N; Ehrlich, P; Hansen, C; Rak, M
Multimodal Data Fusion with Irregular PSA Kinetics for Automated Prostate Cancer Grading Journal Article
In: 2025.
@article{bashkanov_multimodal_2025,
title = {Multimodal Data Fusion with Irregular PSA Kinetics for Automated Prostate Cancer Grading},
author = {O Bashkanov and L Engelage and N Behnel and P Ehrlich and C Hansen and M Rak},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2024

Bashkanov, O; Rak, M; Engelage, L; Hansen, C
Automatic Patient-level Diagnosis of Prostate Disease with Fused 3D MRI and Tabular Clinical Data Proceedings Article
In: Medical Imaging with Deep Learning, pp. 1225–1238, PMLR, 2024, (ISSN: 2640-3498).
@inproceedings{bashkanov_automatic_2024,
title = {Automatic Patient-level Diagnosis of Prostate Disease with Fused 3D MRI and Tabular Clinical Data},
author = {O Bashkanov and M Rak and L Engelage and C Hansen},
url = {https://proceedings.mlr.press/v227/bashkanov24a.html},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
booktitle = {Medical Imaging with Deep Learning},
pages = {1225–1238},
publisher = {PMLR},
abstract = {Computer-aided diagnosis systems for automatic prostate cancer diagnosis can provide radiologists with decision support during image reading. However, in this case, patient-relevant information often remains unexploited due to the greater focus on the image recognition side, with various imaging devices and modalities, while omitting other potentially valuable clinical data. Therefore, our work investigates the performance of recent methods for the fusion of rich image data and heterogeneous tabular data. Those data may include patient demographics as well as laboratory data, e.g., prostate-specific antigen (PSA). Experiments on the large dataset (3800 subjects) indicated that when using the fusion method with demographic data in clinically significant prostate cancer (csPCa) detection tasks, the mean area under the receiver operating characteristic curve (ROC AUC) has improved significantly from 0.736 to 0.765. We also observed that the naïve concatenation performs similarly or even better than the textbackslashmboxstate-of-the-art fusion modules. We also achieved better prediction quality in grading prostate disease by including more samples from longitudinal PSA profiles in the tabular feature set. Thus, by including the three last PSA samples per patient, the best-performing model has reached AUC of 0.794 and a quadratic weighted kappa score (QWK) of 0.464, which constitutes a significant improvement compared with the image-only method, with ROC AUC of 0.736 and QWK of 0.342.},
note = {ISSN: 2640-3498},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2023

Bashkanov, O; Rak, M; Meyer, A; Engelage, L; Lumiani, A; Muschter, R; Hansen, C
Automatic detection of prostate cancer grades and chronic prostatitis in biparametric MRI Journal Article
In: Computer Methods and Programs in Biomedicine, vol. 239, pp. 107624, 2023, ISSN: 0169-2607.
@article{bashkanov_automatic_2023,
title = {Automatic detection of prostate cancer grades and chronic prostatitis in biparametric MRI},
author = {O Bashkanov and M Rak and A Meyer and L Engelage and A Lumiani and R Muschter and C Hansen},
url = {https://www.sciencedirect.com/science/article/pii/S0169260723002894},
doi = {10.1016/j.cmpb.2023.107624},
issn = {0169-2607},
year = {2023},
date = {2023-09-01},
urldate = {2023-09-01},
journal = {Computer Methods and Programs in Biomedicine},
volume = {239},
pages = {107624},
abstract = {Background and objective:With emerging evidence to improve prostate cancer (PCa) screening, multiparametric magnetic prostate imaging is becoming an essential noninvasive component of the diagnostic routine. Computer-aided diagnostic (CAD) tools powered by deep learning can help radiologists interpret multiple volumetric images. In this work, our objective was to examine promising methods recently proposed in the multigrade prostate cancer detection task and to suggest practical considerations regarding model training in this context. Methods:We collected 1647 fine-grained biopsy-confirmed findings, including Gleason scores and prostatitis, to form a training dataset. In our experimental framework for lesion detection, all models utilized 3D nnU-Net architecture that accounts for anisotropy in the MRI data. First, we explore an optimal range of b-values for diffusion-weighted imaging (DWI) modality and its effect on the detection of clinically significant prostate cancer (csPCa) and prostatitis using deep learning, as the optimal range is not yet clearly defined in this domain. Next, we propose a simulated multimodal shift as a data augmentation technique to compensate for the multimodal shift present in the data. Third, we study the effect of incorporating the prostatitis class alongside cancer-related findings at three different granularities of the prostate cancer class (coarse, medium, and fine) and its impact on the detection rate of the target csPCa. Furthermore, ordinal and one-hot encoded (OHE) output formulations were tested. Results: An optimal model configuration with fine class granularity (prostatitis included) and OHE has scored the lesion-wise partial Free-Response Receiver Operating Characteristic (FROC) area under the curve (AUC) of 1.94 (CI 95%: 1.76–2.11) and patient-wise ROC AUC of 0.874 (CI 95%: 0.793–0.938) in the detection of csPCa. Inclusion of the auxiliary prostatitis class has demonstrated a stable relative improvement in specificity at a false positive rate (FPR) of 1.0 per patient, with an increase of 3%, 7%, and 4% for coarse, medium, and fine class granularities. Conclusions: This paper examines several configurations for model training in the biparametric MRI setup and proposes optimal value ranges. It also shows that the fine-grained class configuration, including prostatitis, is beneficial for detecting csPCa. The ability to detect prostatitis in all low-risk cancer lesions suggests the potential to improve the quality of the early diagnosis of prostate diseases. It also implies an improved interpretability of the results by the radiologist.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Gulamhussene, G; Rak, M; Bashkanov, O; Joeres, F; Omari, J; Pech, M; Hansen, C
Transfer-learning is a key ingredient to fast deep learning-based 4D liver MRI reconstruction Journal Article
In: Scientific Reports, vol. 13, no. 1, pp. 11227, 2023, ISSN: 2045-2322, (Publisher: Nature Publishing Group).
@article{gulamhussene_transfer-learning_2023,
title = {Transfer-learning is a key ingredient to fast deep learning-based 4D liver MRI reconstruction},
author = {G Gulamhussene and M Rak and O Bashkanov and F Joeres and J Omari and M Pech and C Hansen},
url = {https://www.nature.com/articles/s41598-023-38073-1},
doi = {10.1038/s41598-023-38073-1},
issn = {2045-2322},
year = {2023},
date = {2023-07-01},
urldate = {2023-07-01},
journal = {Scientific Reports},
volume = {13},
number = {1},
pages = {11227},
abstract = {Time-resolved volumetric magnetic resonance imaging (4D MRI) could be used to address organ motion in image-guided interventions like tumor ablation. Current 4D reconstruction techniques are unsuitable for most interventional settings because they are limited to specific breathing phases, lack temporal/spatial resolution, and have long prior acquisitions or reconstruction times. Deep learning-based (DL) 4D MRI approaches promise to overcome these shortcomings but are sensitive to domain shift. This work shows that transfer learning (TL) combined with an ensembling strategy can help alleviate this key challenge. We evaluate four approaches: pre-trained models from the source domain, models directly trained from scratch on target domain data, models fine-tuned from a pre-trained model and an ensemble of fine-tuned models. For that the data base was split into 16 source and 4 target domain subjects. Comparing ensemble of fine-tuned models (N = 10) with directly learned models, we report significant improvements (P < 0.001) of the root mean squared error (RMSE) of up to 12% and the mean displacement (MDISP) of up to 17.5%. The smaller the target domain data amount, the larger the effect. This shows that TL + Ens significantly reduces beforehand acquisition time and improves reconstruction quality, rendering it a key component in making 4D MRI clinically feasible for the first time in the context of 4D organ motion models of the liver and beyond.},
note = {Publisher: Nature Publishing Group},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Gulamhussene, G; Spiegel, J; Das, A; Rak, M; Hansen, C
Deep Learning-based Marker-less Pose Estimation of Interventional Tools using Surrogate Keypoints Proceedings Article
In: Deserno, T; Handels, H; Maier, A; Maier-Hein, K; Palm, C; Tolxdorff, T (Ed.): Bildverarbeitung für die Medizin 2023, pp. 292–298, Springer Fachmedien, Wiesbaden, 2023, ISBN: 978-3-658-41657-7.
@inproceedings{gulamhussene_deep_2023,
title = {Deep Learning-based Marker-less Pose Estimation of Interventional Tools using Surrogate Keypoints},
author = {G Gulamhussene and J Spiegel and A Das and M Rak and C Hansen},
editor = {T Deserno and H Handels and A Maier and K Maier-Hein and C Palm and T Tolxdorff},
doi = {10.1007/978-3-658-41657-7_63},
isbn = {978-3-658-41657-7},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
booktitle = {Bildverarbeitung für die Medizin 2023},
pages = {292–298},
publisher = {Springer Fachmedien},
address = {Wiesbaden},
abstract = {Estimating the position of an intervention needle is an important ability in computer-assisted interventions. Currently, such pose estimations rely either on radiation-intensive CT imaging or need additional optical markers which add overhead to the clinical workflow. We propose a novel deep-learning-based technique for pose estimation of interventional tools which relies on detecting visible features on the tool itself without additional markers.We also propose a novel and fast pipeline for creating vast amounts of robustly labeled and markerless ground truth data for training such neural networks. Initial evaluations suggest that with needle base and needle tip localization errors of about 1 and 4 cm, Our approach can yield a search corridor that can be used to find the needle in a low-dose CT image, reducing radiation exposure.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Gulamhussene, G; Das, A; Spiegel, J; Punzet, D; Rak, M; Hansen, C
Needle Tip Tracking During CT-guided Interventions using Fuzzy Segmentation Proceedings Article
In: Deserno, T; Handels, H; Maier, A; Maier-Hein, K; Palm, C; Tolxdorff, T (Ed.): Bildverarbeitung für die Medizin 2023, pp. 285–291, Springer Fachmedien, Wiesbaden, 2023, ISBN: 978-3-658-41657-7.
@inproceedings{gulamhussene_needle_2023,
title = {Needle Tip Tracking During CT-guided Interventions using Fuzzy Segmentation},
author = {G Gulamhussene and A Das and J Spiegel and D Punzet and M Rak and C Hansen},
editor = {T Deserno and H Handels and A Maier and K Maier-Hein and C Palm and T Tolxdorff},
doi = {10.1007/978-3-658-41657-7_62},
isbn = {978-3-658-41657-7},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
booktitle = {Bildverarbeitung für die Medizin 2023},
pages = {285–291},
publisher = {Springer Fachmedien},
address = {Wiesbaden},
abstract = {CT-guided interventions are standard practice for radiologists to treat lesions in various parts of the human body. In this context, accurate tracking of instruments is of paramount importance for the safety of the procedure and helps radiologists avoid unintended damage to adjacent organs. In this work, a novel method for the estimation of 3D needle tip coordinates in a CT volume using only two 2D projections in an interventional setting is proposed. The method applies a deep learning model for the fuzzy segmentation of the region containing the tip on 2D projections and automatically extracts the position of the tip. A simple UNet achieves a Dice score of 0.9906 for the fuzzy segmentation and an average euclidean distance of 2.96 mm for the needle tip regression task.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Gulamhussene, G; Bashkanov, O; Omari, J; Pech, M; Hansen, C; Rak, M
Using Training Samples as Transitive Information Bridges in Predicted 4D MRI Proceedings Article
In: Xue, Z; Antani, S; Zamzmi, G; Yang, F; Rajaraman, S; Huang, S; Linguraru, M; Liang, Z (Ed.): Medical Image Learning with Limited and Noisy Data, pp. 237–245, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-44917-8.
@inproceedings{gulamhussene_using_2023,
title = {Using Training Samples as Transitive Information Bridges in Predicted 4D MRI},
author = {G Gulamhussene and O Bashkanov and J Omari and M Pech and C Hansen and M Rak},
editor = {Z Xue and S Antani and G Zamzmi and F Yang and S Rajaraman and S Huang and M Linguraru and Z Liang},
doi = {10.1007/978-3-031-44917-8_23},
isbn = {978-3-031-44917-8},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
booktitle = {Medical Image Learning with Limited and Noisy Data},
pages = {237–245},
publisher = {Springer Nature Switzerland},
address = {Cham},
abstract = {The lack of real-time techniques for monitoring respiratory motion impairs the development of guidance systems for image-guided interventions. Recent works show that U-Net based real-time 4D MRI prediction methods are promising, but prone to bad image quality when small training data sets and inputs with multiple MR contrast are used. To overcome this problem, we propose a more efficient use of the spare training data and re-utilize 2D training samples as a secondary input for construction of transitive information bridges between the navigator slice primary input and the data slice prediction. We thus remove the need for a separate 3D breath-hold MRI with different MR contrast as the secondary input. Results show that our novel construction leads to improved prediction quality with very sparse training data, with a significant decrease in root mean squared error (RMSE) from 0.3 to 0.27 (p$$<2.2eˆ-16$$<2.2e-16},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2022

Gulamhussene, G; Meyer, A; Rak, M; Bashkanov, O; Omari, J; Pech, M; Hansen, C
Predicting 4D liver MRI for MR-guided interventions Journal Article
In: Computerized Medical Imaging and Graphics, vol. 101, pp. 102122, 2022, ISSN: 0895-6111.
@article{gulamhussene_predicting_2022,
title = {Predicting 4D liver MRI for MR-guided interventions},
author = {G Gulamhussene and A Meyer and M Rak and O Bashkanov and J Omari and M Pech and C Hansen},
url = {https://www.sciencedirect.com/science/article/pii/S0895611122000921},
doi = {10.1016/j.compmedimag.2022.102122},
issn = {0895-6111},
year = {2022},
date = {2022-10-01},
urldate = {2022-10-01},
journal = {Computerized Medical Imaging and Graphics},
volume = {101},
pages = {102122},
abstract = {Organ motion poses an unresolved challenge in image-guided interventions like radiation therapy, biopsies or tumor ablation. In the pursuit of solving this problem, the research field of time-resolved volumetric magnetic resonance imaging (4D MRI) has evolved. However, current techniques are unsuitable for most interventional settings because they lack sufficient temporal and/or spatial resolution or have long acquisition times. In this work, we propose a novel approach for real-time, high-resolution 4D MRI with large fields of view for MR-guided interventions. To this end, we propose a network-agnostic, end-to-end trainable, deep learning formulation that enables the prediction of a 4D liver MRI with respiratory states from a live 2D navigator MRI. Our method can be used in two ways: First, it can reconstruct high quality fast (near real-time) 4D MRI with high resolution (209×128×128 matrix size with isotropic 1.8mm voxel size and 0.6s/volume) given a dynamic interventional 2D navigator slice for guidance during an intervention. Second, it can be used for retrospective 4D reconstruction with a temporal resolution of below 0.2s/volume for motion analysis and use in radiation therapy. We report a mean target registration error (TRE) of 1.19±0.74mm, which is below voxel size. We compare our results with a state-of-the-art retrospective 4D MRI reconstruction. Visual evaluation shows comparable quality. We compare different network architectures within our formulation. We show that small training sizes with short acquisition times down to 2 min can already achieve promising results and 24 min are sufficient for high quality results. Because our method can be readily combined with earlier time reducing methods, acquisition time can be further decreased while also limiting quality loss. We show that an end-to-end, deep learning formulation is highly promising for 4D MRI reconstruction.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2021

Wei, W; Haishan, X; Alpers, J; Rak, M; Hansen, C
A deep learning approach for 2D ultrasound and 3D CT/MR image registration in liver tumor ablation Journal Article
In: Computer Methods and Programs in Biomedicine, vol. 206, pp. 106117, 2021, ISSN: 0169-2607.
@article{wei_deep_2021,
title = {A deep learning approach for 2D ultrasound and 3D CT/MR image registration in liver tumor ablation},
author = {W Wei and X Haishan and J Alpers and M Rak and C Hansen},
url = {https://www.sciencedirect.com/science/article/pii/S0169260721001929},
doi = {10.1016/j.cmpb.2021.106117},
issn = {0169-2607},
year = {2021},
date = {2021-07-01},
urldate = {2021-07-01},
journal = {Computer Methods and Programs in Biomedicine},
volume = {206},
pages = {106117},
abstract = {Background and Objective
Liver tumor ablation is often guided by ultrasound (US). Due to poor image quality, intraoperative US is fused with preoperative computed tomography or magnetic tomography (CT/MR) images to provide visual guidance. As of today, the underlying 2D US to 3D CT/MR registration problem remains a very challenging task.
Methods
We propose a novel pipeline to address this registration problem. Contrary to previous work, we do not formulate the problem as a regression task, which - for the given registration problem - achieves a low performance regarding accuracy and robustness due to the limited US soft-tissue contrast and the inter-patient variability on liver vessels. Instead, we first estimate the US probe angle roughly by using a classification network. Given this coarse initialization, we then improve the registration by formulation of the problem as a segmentation task, estimating the US plane in the 3D CT/MR through segmentation.
Results
We benchmark our approach on 1035 clinical images from 52 patients, yielding average registration errors of 11.6° and 4.7 mm, which outperforms the state of the art SVR method[1].
Conclusion
Our results show the efficiency of the proposed registration pipeline, which has potential to improve the robustness and accuracy of intraoperative patient registration.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Liver tumor ablation is often guided by ultrasound (US). Due to poor image quality, intraoperative US is fused with preoperative computed tomography or magnetic tomography (CT/MR) images to provide visual guidance. As of today, the underlying 2D US to 3D CT/MR registration problem remains a very challenging task.
Methods
We propose a novel pipeline to address this registration problem. Contrary to previous work, we do not formulate the problem as a regression task, which - for the given registration problem - achieves a low performance regarding accuracy and robustness due to the limited US soft-tissue contrast and the inter-patient variability on liver vessels. Instead, we first estimate the US probe angle roughly by using a classification network. Given this coarse initialization, we then improve the registration by formulation of the problem as a segmentation task, estimating the US plane in the 3D CT/MR through segmentation.
Results
We benchmark our approach on 1035 clinical images from 52 patients, yielding average registration errors of 11.6° and 4.7 mm, which outperforms the state of the art SVR method[1].
Conclusion
Our results show the efficiency of the proposed registration pipeline, which has potential to improve the robustness and accuracy of intraoperative patient registration.

Meyer, A; Mehrtash, A; Rak, M; Bashkanov, O; Langbein, B; Ziaei, A; Kibel, A; Tempany, C; Hansen, C; Tokuda, J
Domain adaptation for segmentation of critical structures for prostate cancer therapy Journal Article
In: Scientific Reports, vol. 11, no. 1, pp. 11480, 2021, ISSN: 2045-2322.
@article{meyer_domain_2021,
title = {Domain adaptation for segmentation of critical structures for prostate cancer therapy},
author = {A Meyer and A Mehrtash and M Rak and O Bashkanov and B Langbein and A Ziaei and A Kibel and C Tempany and C Hansen and J Tokuda},
url = {https://www.nature.com/articles/s41598-021-90294-4},
doi = {10.1038/s41598-021-90294-4},
issn = {2045-2322},
year = {2021},
date = {2021-06-01},
urldate = {2021-06-01},
journal = {Scientific Reports},
volume = {11},
number = {1},
pages = {11480},
abstract = {Preoperative assessment of the proximity of critical structures to the tumors is crucial in avoiding
unnecessary damage during prostate cancer treatment. A patient-specific 3D anatomical model
of those structures, namely the neurovascular bundles (NVB) and the external urethral sphincters
(EUS), can enable physicians to perform such assessments intuitively. As a crucial step to generate a
patient-specific anatomical model from preoperative MRI in a clinical routine, we propose a multi-class
automatic segmentation based on an anisotropic convolutional network. Our specific challenge is to
train the network model on a unique source dataset only available at a single clinical site and deploy it
to another target site without sharing the original images or labels. As network models trained on data
from a single source suffer from quality loss due to the domain shift, we propose a semi-supervised
domain adaptation (DA) method to refine the model’s performance in the target domain. Our DA
method combines transfer learning and uncertainty guided self-learning based on deep ensembles.
Experiments on the segmentation of the prostate, NVB, and EUS, show significant performance gain
with the combination of those techniques compared to pure TL and the combination of TL with simple
self-learning ( p < 0.005 for all structures using a Wilcoxon’s signed-rank test). Results on a different
task and data (Pancreas CT segmentation) demonstrate our method’s generic application capabilities.
Our method has the advantage that it does not require any further data from the source domain,
unlike the majority of recent domain adaptation strategies. This makes our method suitable for clinical
applications, where the sharing of patient data is restricted.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
unnecessary damage during prostate cancer treatment. A patient-specific 3D anatomical model
of those structures, namely the neurovascular bundles (NVB) and the external urethral sphincters
(EUS), can enable physicians to perform such assessments intuitively. As a crucial step to generate a
patient-specific anatomical model from preoperative MRI in a clinical routine, we propose a multi-class
automatic segmentation based on an anisotropic convolutional network. Our specific challenge is to
train the network model on a unique source dataset only available at a single clinical site and deploy it
to another target site without sharing the original images or labels. As network models trained on data
from a single source suffer from quality loss due to the domain shift, we propose a semi-supervised
domain adaptation (DA) method to refine the model’s performance in the target domain. Our DA
method combines transfer learning and uncertainty guided self-learning based on deep ensembles.
Experiments on the segmentation of the prostate, NVB, and EUS, show significant performance gain
with the combination of those techniques compared to pure TL and the combination of TL with simple
self-learning ( p < 0.005 for all structures using a Wilcoxon’s signed-rank test). Results on a different
task and data (Pancreas CT segmentation) demonstrate our method’s generic application capabilities.
Our method has the advantage that it does not require any further data from the source domain,
unlike the majority of recent domain adaptation strategies. This makes our method suitable for clinical
applications, where the sharing of patient data is restricted.

Bashkanov, O; Meyer, A; Schindele, D; Schostak, M; Tönnies, K; Hansen, C; Rak, M
Learning Multi-Modal Volumetric Prostate Registration With Weak Inter-Subject Spatial Correspondence Proceedings Article
In: 2021 IEEE 18th International Symposium on Biomedical Imaging (ISBI), pp. 1817–1821, 2021, (ISSN: 1945-8452).
@inproceedings{bashkanov_learning_2021,
title = {Learning Multi-Modal Volumetric Prostate Registration With Weak Inter-Subject Spatial Correspondence},
author = {O Bashkanov and A Meyer and D Schindele and M Schostak and K Tönnies and C Hansen and M Rak},
url = {https://ieeexplore.ieee.org/abstract/document/9433848},
doi = {10.1109/ISBI48211.2021.9433848},
year = {2021},
date = {2021-04-01},
urldate = {2021-04-01},
booktitle = {2021 IEEE 18th International Symposium on Biomedical Imaging (ISBI)},
pages = {1817–1821},
abstract = {Recent studies demonstrated the eligibility of convolutional neural networks (CNNs) for solving the image registration problem. CNNs enable faster transformation estimation and greater generalization capability needed for better support during medical interventions. Conventional fully-supervised training requires a lot of high-quality ground truth data such as voxel-to-voxel transformations, which typically are attained in a too tedious and error-prone manner. In our work, we use weakly-supervised learning, which optimizes the model indirectly only via segmentation masks that are a more accessible ground truth than the deformation fields. Concerning the weak supervision, we investigate two segmentation similarity measures: multiscale Dice similarity coefficient (mDSC) and the similarity between segmentation-derived signed distance maps (SDMs). We show that the combination of mDSC and SDM similarity measures results in a more accurate and natural transformation pattern together with a stronger gradient coverage. Furthermore, we introduce an auxiliary input to the neural network for the prior information about the prostate location in the MR sequence, which mostly is available preoperatively. This approach significantly outperforms the standard two-input models. With weakly labelled MR-TRUS prostate data, we showed registration quality comparable to the state-of-the-art deep learning-based method.},
note = {ISSN: 1945-8452},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Meyer, A; Ghosh, S; Schindele, D; Schostak, M; Stober, S; Hansen, C; Rak, M
Uncertainty-Aware Temporal Self-Learning (UATS): Semi-Supervised Learning for Segmentation of Prostate Zones and Beyond Miscellaneous
2021, (arXiv:2104.03840 [eess]).
@misc{meyer_uncertainty-aware_2021,
title = {Uncertainty-Aware Temporal Self-Learning (UATS): Semi-Supervised Learning for Segmentation of Prostate Zones and Beyond},
author = {A Meyer and S Ghosh and D Schindele and M Schostak and S Stober and C Hansen and M Rak},
url = {http://arxiv.org/abs/2104.03840},
doi = {10.48550/arXiv.2104.03840},
year = {2021},
date = {2021-04-01},
urldate = {2021-04-01},
publisher = {arXiv},
abstract = {Various convolutional neural network (CNN) based concepts have been introduced for the prostate's automatic segmentation and its coarse subdivision into transition zone (TZ) and peripheral zone (PZ). However, when targeting a fine-grained segmentation of TZ, PZ, distal prostatic urethra (DPU) and the anterior fibromuscular stroma (AFS), the task becomes more challenging and has not yet been solved at the level of human performance. One reason might be the insufficient amount of labeled data for supervised training. Therefore, we propose to apply a semi-supervised learning (SSL) technique named uncertainty-aware temporal self-learning (UATS) to overcome the expensive and time-consuming manual ground truth labeling. We combine the SSL techniques temporal ensembling and uncertainty-guided self-learning to benefit from unlabeled images, which are often readily available. Our method significantly outperforms the supervised baseline and obtained a Dice coefficient (DC) of up to 78.9% , 87.3%, 75.3%, 50.6% for TZ, PZ, DPU and AFS, respectively. The obtained results are in the range of human inter-rater performance for all structures. Moreover, we investigate the method's robustness against noise and demonstrate the generalization capability for varying ratios of labeled data and on other challenging tasks, namely the hippocampus and skin lesion segmentation. UATS achieved superiority segmentation quality compared to the supervised baseline, particularly for minimal amounts of labeled data.},
note = {arXiv:2104.03840 [eess]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}

Meyer, A; Chlebus, G; Rak, M; Schindele, D; Schostak, M; Ginneken, B; Schenk, A; Meine, H; Hahn, H; Schreiber, A; Hansen, C
Anisotropic 3D Multi-Stream CNN for Accurate Prostate Segmentation from Multi-Planar MRI Journal Article
In: Computer Methods and Programs in Biomedicine, vol. 200, pp. 105821, 2021, ISSN: 01692607, (arXiv:2009.11120 [eess]).
@article{meyer_anisotropic_2021,
title = {Anisotropic 3D Multi-Stream CNN for Accurate Prostate Segmentation from Multi-Planar MRI},
author = {A Meyer and G Chlebus and M Rak and D Schindele and M Schostak and B Ginneken and A Schenk and H Meine and H Hahn and A Schreiber and C Hansen},
url = {http://arxiv.org/abs/2009.11120},
doi = {10.1016/j.cmpb.2020.105821},
issn = {01692607},
year = {2021},
date = {2021-03-01},
urldate = {2021-03-01},
journal = {Computer Methods and Programs in Biomedicine},
volume = {200},
pages = {105821},
abstract = {Background and Objective: Accurate and reliable segmentation of the prostate gland in MR images can support the clinical assessment of prostate cancer, as well as the planning and monitoring of focal and loco-regional therapeutic interventions. Despite the availability of multi-planar MR scans due to standardized protocols, the majority of segmentation approaches presented in the literature consider the axial scans only. Methods: We propose an anisotropic 3D multi-stream CNN architecture, which processes additional scan directions to produce a higher-resolution isotropic prostate segmentation. We investigate two variants of our architecture, which work on two (dual-plane) and three (triple-plane) image orientations, respectively. We compare them with the standard baseline (single-plane) used in literature, i.e., plain axial segmentation. To realize a fair comparison, we employ a hyperparameter optimization strategy to select optimal configurations for the individual approaches. Results: Training and evaluation on two datasets spanning multiple sites obtain statistical significant improvement over the plain axial segmentation ($p<0.05$ on the Dice similarity coefficient). The improvement can be observed especially at the base ($0.898$ single-plane vs. $0.906$ triple-plane) and apex ($0.888$ single-plane vs. $0.901$ dual-plane). Conclusion: This study indicates that models employing two or three scan directions are superior to plain axial segmentation. The knowledge of precise boundaries of the prostate is crucial for the conservation of risk structures. Thus, the proposed models have the potential to improve the outcome of prostate cancer diagnosis and therapies.},
note = {arXiv:2009.11120 [eess]},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Ernst, P; Rak, M; Hansen, C; Rose, G; Nürnberger, A
Trajectory Upsampling for Sparse Conebeam Projections using Convolutional Neural Networks Journal Article
In: 2021.
@article{ernst_trajectory_2021,
title = {Trajectory Upsampling for Sparse Conebeam Projections using Convolutional Neural Networks},
author = {P Ernst and M Rak and C Hansen and G Rose and A Nürnberger},
year = {2021},
date = {2021-01-01},
urldate = {2021-01-01},
abstract = {In this paper, we present an approach based on a combination of convolutional neural networks and analytical algorithms to interpolate between neighboring conebeam projections for upsampling along circular trajectories. More precisely, networks are trained to interpolate the angularly centered projection between the input projections of different angular distances. Experiments show that an analytical interpolation as additional input is more beneficial than adding more neighboring projections. Using our best model, we achieve an x8 upsampling by repeating the interpolation three times. Though not depending on a specific reconstruction algorithm, we show that FDK reconstructions substantially benefit from this upsampling for removing streak artifacts. Using this FDK reconstruction as initialization for ART is also superior to other initializations but comes with a higher computation time and therefore cannot be considered as an option in an interventional setting.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2020

Gulamhussene, G; Joeres, F; Rak, M; Pech, M; Hansen, C
4D MRI: Robust sorting of free breathing MRI slices for use in interventional settings Journal Article
In: PLOS ONE, vol. 15, no. 6, pp. e0235175, 2020, ISSN: 1932-6203, (Publisher: Public Library of Science).
@article{gulamhussene_4d_2020,
title = {4D MRI: Robust sorting of free breathing MRI slices for use in interventional settings},
author = {G Gulamhussene and F Joeres and M Rak and M Pech and C Hansen},
url = {https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0235175},
doi = {10.1371/journal.pone.0235175},
issn = {1932-6203},
year = {2020},
date = {2020-06-01},
urldate = {2020-06-01},
journal = {PLOS ONE},
volume = {15},
number = {6},
pages = {e0235175},
abstract = {Purpose We aim to develop a robust 4D MRI method for large FOVs enabling the extraction of irregular respiratory motion that is readily usable with all MRI machines and thus applicable to support a wide range of interventional settings. Method We propose a 4D MRI reconstruction method to capture an arbitrary number of breathing states. It uses template updates in navigator slices and search regions for fast and robust vessel cross-section tracking. It captures FOVs of 255 mm x 320 mm x 228 mm at a spatial resolution of 1.82 mm x 1.82 mm x 4mm and temporal resolution of 200ms. A total of 37 4D MRIs of 13 healthy subjects were reconstructed to validate the method. A quantitative evaluation of the reconstruction rate and speed of both the new and baseline method was performed. Additionally, a study with ten radiologists was conducted to assess the subjective reconstruction quality of both methods. Results Our results indicate improved mean reconstruction rates compared to the baseline method (79.4% vs. 45.5%) and improved mean reconstruction times (24s vs. 73s) per subject. Interventional radiologists perceive the reconstruction quality of our method as higher compared to the baseline (262.5 points vs. 217.5 points},
note = {Publisher: Public Library of Science},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Wei, W; Rak, M; Alpers, J; Hansen, C
Towards Fully Automatic 2D Us to 3D CT/MR Registration: A Novel Segmentation-Based Strategy Proceedings Article
In: 2020 IEEE 17th International Symposium on Biomedical Imaging (ISBI), pp. 433–437, IEEE, Iowa City, IA, USA, 2020, ISBN: 978-1-5386-9330-8.
@inproceedings{wei_towards_2020,
title = {Towards Fully Automatic 2D Us to 3D CT/MR Registration: A Novel Segmentation-Based Strategy},
author = {W Wei and M Rak and J Alpers and C Hansen},
url = {https://ieeexplore.ieee.org/document/9098379/},
doi = {10.1109/ISBI45749.2020.9098379},
isbn = {978-1-5386-9330-8},
year = {2020},
date = {2020-04-01},
urldate = {2020-04-01},
booktitle = {2020 IEEE 17th International Symposium on Biomedical Imaging (ISBI)},
pages = {433–437},
publisher = {IEEE},
address = {Iowa City, IA, USA},
abstract = {2D-US to 3D-CT/MR registration is a crucial module during minimally invasive ultrasound-guided liver tumor ablations. Many modern registration methods still require manual or semi-automatic slice pose initialization due to insufficient robustness of automatic methods. The state-of-the-art regression networks do not work well for liver 2D US to 3D CT/MR registration because of the tremendous inter-patient variability of the liver anatomy. To address this unsolved problem, we propose a deep learning network pipeline which instead of a regression starts with a classification network to recognize the coarse ultrasound transducer pose followed by a segmentation network to detect the target plane of the US image in the CT/MR volume. The rigid registration result is derived using plane regression. In contrast to the state-of-the-art regression networks, we do not estimate registration parameters from multi-modal images directly, but rather focus on segmenting the target slice plane in the volume. The experiments reveal that this novel registration strategy can identify the initial slice phase in a 3D volume more reliably than the standard regression-based techniques. The proposed method was evaluated with 1035 US images from 52 patients. We achieved angle and distance errors of 12.7 ± 6.2 degrees and 4.9 ± 3.1 mm, clearly outperforming the state-of-the-art regression strategy which results in 37.0 ± 15.6 degrees angle error and 19.0 ± 11.6 mm distance error.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Schindele, D; Meyer, A; Reibnitz, D Von; Kiesswetter, V; Schostak, M; Rak, M; Hansen, C
High Resolution Prostate Segmentations for the ProstateX-Challenge Miscellaneous
2020.
@misc{schindele_high_2020,
title = {High Resolution Prostate Segmentations for the ProstateX-Challenge},
author = {D Schindele and A Meyer and D Von Reibnitz and V Kiesswetter and M Schostak and M Rak and C Hansen},
url = {https://www.cancerimagingarchive.net/analysis-result/prostatex-seg-hires/},
doi = {10.7937/TCIA.2019.DEG7ZG1U},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
publisher = {The Cancer Imaging Archive},
abstract = {We created 66 high resolution segmentations for randomly chosen T2-weighted volumes of the ProstateX challenge. The high resolution segmentations were obtained by considering the three scan directions: for each scan direction (axial, sagittal, coronal), the gland was manually delineated by a medical student, followed by a review and corrections of an expert urologist. These three anisotropic segmentations were fused to one isotropic segmentation by means of shape-based interpolation in the following manner: (1) The signed distance transformation of the three segmentations is computed. (2) The anisotropic distance volumes are transformed into an isotropic high-resolution representation with linear interpolation. (3) By averaging the distances, smoothing and thresholding them at zero, we obtained the fused segmentation. The resulting segmentations were manually verified and corrected further by the expert urologist if necessary. Serving as ground truth for training CNNs, these segmentations have the potential to improve the segmentation accuracy of automated algorithms. By considering not only the axial scans but also sagittal and coronal scan directions, we aimed to have higher fidelity of the segmentations especially at the apex and base regions of the prostate.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}

Meyer, A; Schindele, D; Reibnitz, D; Rak, M; Schostak, M; Hansen, C
PROSTATEx Zone Segmentations Miscellaneous
2020.
@misc{meyer_prostatex_2020,
title = {PROSTATEx Zone Segmentations},
author = {A Meyer and D Schindele and D Reibnitz and M Rak and M Schostak and C Hansen},
url = {https://www.cancerimagingarchive.net/analysis-result/prostatex-seg-zones/},
doi = {10.7937/TCIA.NBB4-4655},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
publisher = {The Cancer Imaging Archive},
abstract = {This collection contains prostate’s zonal segmentation for 98 cases randomly selected from the SPIE-AAPM-NCI PROSTATEx Challenge. The four-class segmentation encompasses the peripheral zone, transition zone, fibromuscular stroma and the distal prostatic urethra. As underlying images, we used transversal T2w scans. Segmentations were created by a medical student with experience in prostate segmentation and an expert urologist who instructed the student and double-checked the segmentations in the end. The DICOM representation of these segmentations were generated with dcmqi.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
2019

Rak, M; Steffen, J; Meyer, A; Hansen, C; Tönnies, K
Combining convolutional neural networks and star convex cuts for fast whole spine vertebra segmentation in MRI Journal Article
In: Computer Methods and Programs in Biomedicine, vol. 177, pp. 47–56, 2019, ISSN: 01692607.
@article{rak_combining_2019,
title = {Combining convolutional neural networks and star convex cuts for fast whole spine vertebra segmentation in MRI},
author = {M Rak and J Steffen and A Meyer and C Hansen and K Tönnies},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0169260718307417},
doi = {10.1016/j.cmpb.2019.05.003},
issn = {01692607},
year = {2019},
date = {2019-08-01},
urldate = {2019-08-01},
journal = {Computer Methods and Programs in Biomedicine},
volume = {177},
pages = {47–56},
abstract = {Methods: We address these limitations by a novel graph cut formulation based on vertebra patches extracted along the spine. For each patch, our formulation incorporates appearance and shape information derived from a task-specific convolutional neural network as well as star-convexity constraints that ensure a topologically correct segmentation of each vertebra. When segmenting vertebrae individually, ambiguities will occur due to overlapping segmentations of adjacent vertebrae. We tackle this problem by novel nonoverlap constraints between neighboring patches based on so-called encoding swaps. The latter allow us to obtain a globally optimal multi-label segmentation of all vertebrae in polynomial time.
Results: We validated our approach on two data sets. The first contains T1- and T2-weighted whole spine images of 64 subjects with varying health conditions. The second comprises 23 T2-weighted thoracolumbar images of young healthy adults and is publicly available. Our method yielded Dice coefficients of 93.8 ± 2.6 % and 96.0 ± 1.0 % for both data sets with a run time of 1.35 ± 0.08 s and 0.90 ± 0.03 s per vertebra on consumer hardware. A complete whole spine segmentation took 32.4 ± 1.92 s on average.
Conclusions: Our results are superior to those of previous works at a fraction of their run time, which illustrates the efficiency and effectiveness of our whole spine segmentation approach.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Results: We validated our approach on two data sets. The first contains T1- and T2-weighted whole spine images of 64 subjects with varying health conditions. The second comprises 23 T2-weighted thoracolumbar images of young healthy adults and is publicly available. Our method yielded Dice coefficients of 93.8 ± 2.6 % and 96.0 ± 1.0 % for both data sets with a run time of 1.35 ± 0.08 s and 0.90 ± 0.03 s per vertebra on consumer hardware. A complete whole spine segmentation took 32.4 ± 1.92 s on average.
Conclusions: Our results are superior to those of previous works at a fraction of their run time, which illustrates the efficiency and effectiveness of our whole spine segmentation approach.

Wei, W; Xu, H; Alpers, J; Tianbao, Z; Wang, L; Rak, M; Hansen, C
Fast Registration for Liver Motion Compensation in Ultrasound-Guided Navigation Proceedings Article
In: 2019 IEEE 16th International Symposium on Biomedical Imaging (ISBI 2019), pp. 1132–1136, IEEE, Venice, Italy, 2019, ISBN: 978-1-5386-3641-1.
@inproceedings{wei_fast_2019,
title = {Fast Registration for Liver Motion Compensation in Ultrasound-Guided Navigation},
author = {W Wei and H Xu and J Alpers and Z Tianbao and L Wang and M Rak and C Hansen},
url = {https://ieeexplore.ieee.org/document/8759464/},
doi = {10.1109/ISBI.2019.8759464},
isbn = {978-1-5386-3641-1},
year = {2019},
date = {2019-04-01},
urldate = {2019-04-01},
booktitle = {2019 IEEE 16th International Symposium on Biomedical Imaging (ISBI 2019)},
pages = {1132–1136},
publisher = {IEEE},
address = {Venice, Italy},
abstract = {In recent years, image-guided thermal ablations have become a considerable treatment method for cancer patients, including support through navigational systems. One of the most critical challenges in these systems is the registration between the intraoperative images and the preoperative volume. The motion secondary to inspiration makes registration even more difficult. In this work, we propose a coarse-fine fast patient registration technique to solve the problem of motion compensation. In contrast to other state-of-the-art methods, we focus on improving the convergence range of registration. To this end, we make use of a Deep Learning 2D UNet framework to extract the vessels and liver borders from intraoperative ultrasound images and employ the segmentation results as regions of interest in the registration. After an initial 3D-3D registration during breath hold, the following motion compensation is achieved using a 2D-3D registration. Our approach yields a convergence rate of over 70% with an accuracy of 1.97 ± 1.07 mm regarding the target registration error. The 2D-3D registration is GPU-accelerated with a time cost of less than 200 ms.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Meyer, A; Rak, M; Schindele, D; Blaschke, S; Schostak, M; Fedorov, A; Hansen, C
Towards Patient-Individual PI-Rads v2 Sector Map: Cnn for Automatic Segmentation of Prostatic Zones From T2-Weighted MRI Proceedings Article
In: 2019 IEEE 16th International Symposium on Biomedical Imaging (ISBI 2019), pp. 696–700, IEEE, Venice, Italy, 2019, ISBN: 978-1-5386-3641-1.
@inproceedings{meyer_towards_2019,
title = {Towards Patient-Individual PI-Rads v2 Sector Map: Cnn for Automatic Segmentation of Prostatic Zones From T2-Weighted MRI},
author = {A Meyer and M Rak and D Schindele and S Blaschke and M Schostak and A Fedorov and C Hansen},
url = {https://ieeexplore.ieee.org/document/8759572/},
doi = {10.1109/ISBI.2019.8759572},
isbn = {978-1-5386-3641-1},
year = {2019},
date = {2019-04-01},
urldate = {2019-04-01},
booktitle = {2019 IEEE 16th International Symposium on Biomedical Imaging (ISBI 2019)},
pages = {696–700},
publisher = {IEEE},
address = {Venice, Italy},
abstract = {Automatic segmentation of the prostate, its inner and surrounding structures is highly desired for various applications. Several works have been presented for segmentation of anatomical zones of the prostate that are limited to the transition and peripheral zone. Following the spatial division according to the PI-RADS v2 sector map, we present a multi-class segmentation method that additionally targets the anterior fibromuscular stroma and distal prostatic urethra to improve computer-aided detection methods and enable a more precise therapy planning. We propose a multi-class segmentation with an anisotropic convolutional neural network that generates a topologically correct division of the prostate into these four structures. We evaluated our method on a dataset of T2-weighted axial MRI scans (n=98 subjects) and obtained results in the range of inter-rater variability for the majority of the zones.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Ernst, P; Hille, G; Hansen, C; Tönnies, K; Rak, M
In: Shen, D; Liu, T; Peters, T; Staib, L; Essert, C; Zhou, S; Yap, P; Khan, A (Ed.): Medical Image Computing and Computer Assisted Intervention – MICCAI 2019, vol. 11767, pp. 3–11, Springer International Publishing, Cham, 2019, ISBN: 978-3-030-32250-2 978-3-030-32251-9, (Series Title: Lecture Notes in Computer Science).
@incollection{shen_cnn-based_2019,
title = {A CNN-Based Framework for Statistical Assessment of Spinal Shape and Curvature in Whole-Body MRI Images of Large Populations},
author = {P Ernst and G Hille and C Hansen and K Tönnies and M Rak},
editor = {D Shen and T Liu and T Peters and L Staib and C Essert and S Zhou and P Yap and A Khan},
url = {https://link.springer.com/10.1007/978-3-030-32251-9_1},
doi = {10.1007/978-3-030-32251-9_1},
isbn = {978-3-030-32250-2 978-3-030-32251-9},
year = {2019},
date = {2019-01-01},
urldate = {2019-01-01},
booktitle = {Medical Image Computing and Computer Assisted Intervention – MICCAI 2019},
volume = {11767},
pages = {3–11},
publisher = {Springer International Publishing},
address = {Cham},
abstract = {The extraction of spines from medical records in a fast yet accurate way is a challenging task, especially for large data sets. Addressing this issue, we present a framework based on convolutional neural networks for the reconstruction of the spinal shape and curvature, making statistical assessments feasible on epidemiological scale. Our method uses a two-step strategy. First, anchor vertebrae and the spinal centerline in between them get extracted. Second, the centerlines are transformed into a common coordinate system to enable comparisons and statistical assessments across subjects. Our networks were trained on 103 subjects, where we achieved accuracies of 3.3 mm on average, taking at most 1 s per record, which eases the handling of even very large cohorts. Without any further training, we validated our model on study data of about 3400 subjects with only 10 cases of failure, which demonstrates the robustness of our method with respect to the natural variability in spinal shape and curvature. A thorough statistical analysis of the results underpins the importance of our work. Specifically, we show that the spinal curvature is significantly influenced by the body mass index of a subject. Moreover, we show that the same findings arise when Cobb angles are considered instead of direct curvature measures. To this end, we propose a generalization of classical Cobb angles that can be evaluated algorithmically and can also serve as a useful (visual) tool for physicians in everyday clinical practice.},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
2018

Meyer, A; Mehrtash, A; Rak, M; Schindele, D; Schostak, M; Tempany, C; Kapur, T; Abolmaesumi, P; Fedorov, A; Hansen, C
Automatic high resolution segmentation of the prostate from multi-planar MRI Proceedings Article
In: 2018 IEEE 15th International Symposium on Biomedical Imaging (ISBI 2018), pp. 177–181, IEEE, Washington, DC, 2018, ISBN: 978-1-5386-3636-7.
@inproceedings{meyer_automatic_2018,
title = {Automatic high resolution segmentation of the prostate from multi-planar MRI},
author = {A Meyer and A Mehrtash and M Rak and D Schindele and M Schostak and C Tempany and T Kapur and P Abolmaesumi and A Fedorov and C Hansen},
url = {https://ieeexplore.ieee.org/document/8363549/},
doi = {10.1109/ISBI.2018.8363549},
isbn = {978-1-5386-3636-7},
year = {2018},
date = {2018-04-01},
urldate = {2018-04-01},
booktitle = {2018 IEEE 15th International Symposium on Biomedical Imaging (ISBI 2018)},
pages = {177–181},
publisher = {IEEE},
address = {Washington, DC},
abstract = {Individualized and accurate segmentations of the prostate are essential for diagnosis as well as therapy planning in prostate cancer (PCa). Most of the previously proposed prostate segmentation approaches rely purely on axial MRI scans, which suffer from low out-of-plane resolution. We propose a method that makes use of sagittal and coronal MRI scans to improve the accuracy of segmentation. These scans are typically acquired as standard of care for PCa staging, but are generally ignored by the segmentation algorithms. Our method is based on a multi-stream 3D convolutional neural network for the automatic extraction of isotropic high resolution segmentations from MR images. We evaluated segmentation performance on an isotropic high resolution ground truth (n = 40 subjects). The results show that the use of multi-planar volumes for prostate segmentation leads to improved segmentation results not only for the whole prostate (92.1% Dice similarity coefficient), but also in apex and base regions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}