Dr. Gino Gulamhussene
Publications
2023

Gulamhussene, G; Rak, M; Bashkanov, O; Joeres, F; Omari, J; Pech, M; Hansen, C
Transfer-learning is a key ingredient to fast deep learning-based 4D liver MRI reconstruction Journal Article
In: Scientific Reports, vol. 13, no. 1, pp. 11227, 2023, ISSN: 2045-2322, (Publisher: Nature Publishing Group).
@article{gulamhussene_transfer-learning_2023,
title = {Transfer-learning is a key ingredient to fast deep learning-based 4D liver MRI reconstruction},
author = {G Gulamhussene and M Rak and O Bashkanov and F Joeres and J Omari and M Pech and C Hansen},
url = {https://www.nature.com/articles/s41598-023-38073-1},
doi = {10.1038/s41598-023-38073-1},
issn = {2045-2322},
year = {2023},
date = {2023-07-01},
urldate = {2023-07-01},
journal = {Scientific Reports},
volume = {13},
number = {1},
pages = {11227},
abstract = {Time-resolved volumetric magnetic resonance imaging (4D MRI) could be used to address organ motion in image-guided interventions like tumor ablation. Current 4D reconstruction techniques are unsuitable for most interventional settings because they are limited to specific breathing phases, lack temporal/spatial resolution, and have long prior acquisitions or reconstruction times. Deep learning-based (DL) 4D MRI approaches promise to overcome these shortcomings but are sensitive to domain shift. This work shows that transfer learning (TL) combined with an ensembling strategy can help alleviate this key challenge. We evaluate four approaches: pre-trained models from the source domain, models directly trained from scratch on target domain data, models fine-tuned from a pre-trained model and an ensemble of fine-tuned models. For that the data base was split into 16 source and 4 target domain subjects. Comparing ensemble of fine-tuned models (N = 10) with directly learned models, we report significant improvements (P < 0.001) of the root mean squared error (RMSE) of up to 12% and the mean displacement (MDISP) of up to 17.5%. The smaller the target domain data amount, the larger the effect. This shows that TL + Ens significantly reduces beforehand acquisition time and improves reconstruction quality, rendering it a key component in making 4D MRI clinically feasible for the first time in the context of 4D organ motion models of the liver and beyond.},
note = {Publisher: Nature Publishing Group},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Gulamhussene, G; Spiegel, J; Das, A; Rak, M; Hansen, C
Deep Learning-based Marker-less Pose Estimation of Interventional Tools using Surrogate Keypoints Proceedings Article
In: Deserno, T; Handels, H; Maier, A; Maier-Hein, K; Palm, C; Tolxdorff, T (Ed.): Bildverarbeitung für die Medizin 2023, pp. 292–298, Springer Fachmedien, Wiesbaden, 2023, ISBN: 978-3-658-41657-7.
@inproceedings{gulamhussene_deep_2023,
title = {Deep Learning-based Marker-less Pose Estimation of Interventional Tools using Surrogate Keypoints},
author = {G Gulamhussene and J Spiegel and A Das and M Rak and C Hansen},
editor = {T Deserno and H Handels and A Maier and K Maier-Hein and C Palm and T Tolxdorff},
doi = {10.1007/978-3-658-41657-7_63},
isbn = {978-3-658-41657-7},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
booktitle = {Bildverarbeitung für die Medizin 2023},
pages = {292–298},
publisher = {Springer Fachmedien},
address = {Wiesbaden},
abstract = {Estimating the position of an intervention needle is an important ability in computer-assisted interventions. Currently, such pose estimations rely either on radiation-intensive CT imaging or need additional optical markers which add overhead to the clinical workflow. We propose a novel deep-learning-based technique for pose estimation of interventional tools which relies on detecting visible features on the tool itself without additional markers.We also propose a novel and fast pipeline for creating vast amounts of robustly labeled and markerless ground truth data for training such neural networks. Initial evaluations suggest that with needle base and needle tip localization errors of about 1 and 4 cm, Our approach can yield a search corridor that can be used to find the needle in a low-dose CT image, reducing radiation exposure.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Gulamhussene, G; Das, A; Spiegel, J; Punzet, D; Rak, M; Hansen, C
Needle Tip Tracking During CT-guided Interventions using Fuzzy Segmentation Proceedings Article
In: Deserno, T; Handels, H; Maier, A; Maier-Hein, K; Palm, C; Tolxdorff, T (Ed.): Bildverarbeitung für die Medizin 2023, pp. 285–291, Springer Fachmedien, Wiesbaden, 2023, ISBN: 978-3-658-41657-7.
@inproceedings{gulamhussene_needle_2023,
title = {Needle Tip Tracking During CT-guided Interventions using Fuzzy Segmentation},
author = {G Gulamhussene and A Das and J Spiegel and D Punzet and M Rak and C Hansen},
editor = {T Deserno and H Handels and A Maier and K Maier-Hein and C Palm and T Tolxdorff},
doi = {10.1007/978-3-658-41657-7_62},
isbn = {978-3-658-41657-7},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
booktitle = {Bildverarbeitung für die Medizin 2023},
pages = {285–291},
publisher = {Springer Fachmedien},
address = {Wiesbaden},
abstract = {CT-guided interventions are standard practice for radiologists to treat lesions in various parts of the human body. In this context, accurate tracking of instruments is of paramount importance for the safety of the procedure and helps radiologists avoid unintended damage to adjacent organs. In this work, a novel method for the estimation of 3D needle tip coordinates in a CT volume using only two 2D projections in an interventional setting is proposed. The method applies a deep learning model for the fuzzy segmentation of the region containing the tip on 2D projections and automatically extracts the position of the tip. A simple UNet achieves a Dice score of 0.9906 for the fuzzy segmentation and an average euclidean distance of 2.96 mm for the needle tip regression task.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Gulamhussene, G; Bashkanov, O; Omari, J; Pech, M; Hansen, C; Rak, M
Using Training Samples as Transitive Information Bridges in Predicted 4D MRI Proceedings Article
In: Xue, Z; Antani, S; Zamzmi, G; Yang, F; Rajaraman, S; Huang, S; Linguraru, M; Liang, Z (Ed.): Medical Image Learning with Limited and Noisy Data, pp. 237–245, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-44917-8.
@inproceedings{gulamhussene_using_2023,
title = {Using Training Samples as Transitive Information Bridges in Predicted 4D MRI},
author = {G Gulamhussene and O Bashkanov and J Omari and M Pech and C Hansen and M Rak},
editor = {Z Xue and S Antani and G Zamzmi and F Yang and S Rajaraman and S Huang and M Linguraru and Z Liang},
doi = {10.1007/978-3-031-44917-8_23},
isbn = {978-3-031-44917-8},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
booktitle = {Medical Image Learning with Limited and Noisy Data},
pages = {237–245},
publisher = {Springer Nature Switzerland},
address = {Cham},
abstract = {The lack of real-time techniques for monitoring respiratory motion impairs the development of guidance systems for image-guided interventions. Recent works show that U-Net based real-time 4D MRI prediction methods are promising, but prone to bad image quality when small training data sets and inputs with multiple MR contrast are used. To overcome this problem, we propose a more efficient use of the spare training data and re-utilize 2D training samples as a secondary input for construction of transitive information bridges between the navigator slice primary input and the data slice prediction. We thus remove the need for a separate 3D breath-hold MRI with different MR contrast as the secondary input. Results show that our novel construction leads to improved prediction quality with very sparse training data, with a significant decrease in root mean squared error (RMSE) from 0.3 to 0.27 (p$$<2.2eˆ-16$$<2.2e-16},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2022

Gulamhussene, G; Meyer, A; Rak, M; Bashkanov, O; Omari, J; Pech, M; Hansen, C
Predicting 4D liver MRI for MR-guided interventions Journal Article
In: Computerized Medical Imaging and Graphics, vol. 101, pp. 102122, 2022, ISSN: 0895-6111.
@article{gulamhussene_predicting_2022,
title = {Predicting 4D liver MRI for MR-guided interventions},
author = {G Gulamhussene and A Meyer and M Rak and O Bashkanov and J Omari and M Pech and C Hansen},
url = {https://www.sciencedirect.com/science/article/pii/S0895611122000921},
doi = {10.1016/j.compmedimag.2022.102122},
issn = {0895-6111},
year = {2022},
date = {2022-10-01},
urldate = {2022-10-01},
journal = {Computerized Medical Imaging and Graphics},
volume = {101},
pages = {102122},
abstract = {Organ motion poses an unresolved challenge in image-guided interventions like radiation therapy, biopsies or tumor ablation. In the pursuit of solving this problem, the research field of time-resolved volumetric magnetic resonance imaging (4D MRI) has evolved. However, current techniques are unsuitable for most interventional settings because they lack sufficient temporal and/or spatial resolution or have long acquisition times. In this work, we propose a novel approach for real-time, high-resolution 4D MRI with large fields of view for MR-guided interventions. To this end, we propose a network-agnostic, end-to-end trainable, deep learning formulation that enables the prediction of a 4D liver MRI with respiratory states from a live 2D navigator MRI. Our method can be used in two ways: First, it can reconstruct high quality fast (near real-time) 4D MRI with high resolution (209×128×128 matrix size with isotropic 1.8mm voxel size and 0.6s/volume) given a dynamic interventional 2D navigator slice for guidance during an intervention. Second, it can be used for retrospective 4D reconstruction with a temporal resolution of below 0.2s/volume for motion analysis and use in radiation therapy. We report a mean target registration error (TRE) of 1.19±0.74mm, which is below voxel size. We compare our results with a state-of-the-art retrospective 4D MRI reconstruction. Visual evaluation shows comparable quality. We compare different network architectures within our formulation. We show that small training sizes with short acquisition times down to 2 min can already achieve promising results and 24 min are sufficient for high quality results. Because our method can be readily combined with earlier time reducing methods, acquisition time can be further decreased while also limiting quality loss. We show that an end-to-end, deep learning formulation is highly promising for 4D MRI reconstruction.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2020

Gulamhussene, G; Joeres, F; Rak, M; Pech, M; Hansen, C
4D MRI: Robust sorting of free breathing MRI slices for use in interventional settings Journal Article
In: PLOS ONE, vol. 15, no. 6, pp. e0235175, 2020, ISSN: 1932-6203, (Publisher: Public Library of Science).
@article{gulamhussene_4d_2020,
title = {4D MRI: Robust sorting of free breathing MRI slices for use in interventional settings},
author = {G Gulamhussene and F Joeres and M Rak and M Pech and C Hansen},
url = {https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0235175},
doi = {10.1371/journal.pone.0235175},
issn = {1932-6203},
year = {2020},
date = {2020-06-01},
urldate = {2020-06-01},
journal = {PLOS ONE},
volume = {15},
number = {6},
pages = {e0235175},
abstract = {Purpose We aim to develop a robust 4D MRI method for large FOVs enabling the extraction of irregular respiratory motion that is readily usable with all MRI machines and thus applicable to support a wide range of interventional settings. Method We propose a 4D MRI reconstruction method to capture an arbitrary number of breathing states. It uses template updates in navigator slices and search regions for fast and robust vessel cross-section tracking. It captures FOVs of 255 mm x 320 mm x 228 mm at a spatial resolution of 1.82 mm x 1.82 mm x 4mm and temporal resolution of 200ms. A total of 37 4D MRIs of 13 healthy subjects were reconstructed to validate the method. A quantitative evaluation of the reconstruction rate and speed of both the new and baseline method was performed. Additionally, a study with ten radiologists was conducted to assess the subjective reconstruction quality of both methods. Results Our results indicate improved mean reconstruction rates compared to the baseline method (79.4% vs. 45.5%) and improved mean reconstruction times (24s vs. 73s) per subject. Interventional radiologists perceive the reconstruction quality of our method as higher compared to the baseline (262.5 points vs. 217.5 points},
note = {Publisher: Public Library of Science},
keywords = {},
pubstate = {published},
tppubtype = {article}
}