Oleksii Bashkanov, M.Sc.
Publications
2025

Bashkanov, O; Rak, M; Engelage, L; Hansen, C
Augmenting Prostate MRI Dataset with Synthetic Volumetric Images from Zone-Conditioned Diffusion Generative Model Proceedings Article
In: Mukhopadhyay, A; Oksuz, Ilkay; Engelhardt, Sandy; Mehrof, Dorit; Yuan, Yixuan (Ed.): Deep Generative Models, pp. 160–168, Springer Nature Switzerland, Cham, 2025, ISBN: 978-3-031-72744-3.
@inproceedings{bashkanov_augmenting_2025,
title = {Augmenting Prostate MRI Dataset with Synthetic Volumetric Images from Zone-Conditioned Diffusion Generative Model},
author = {O Bashkanov and M Rak and L Engelage and C Hansen},
editor = {A Mukhopadhyay and Ilkay Oksuz and Sandy Engelhardt and Dorit Mehrof and Yixuan Yuan},
doi = {10.1007/978-3-031-72744-3_16},
isbn = {978-3-031-72744-3},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
booktitle = {Deep Generative Models},
pages = {160–168},
publisher = {Springer Nature Switzerland},
address = {Cham},
abstract = {The need for artificial intelligence (AI)-driven computer-assist ed diagnosis (CAD) tools drives up the demand for large high-quality datasets in medical imaging. However, collecting the necessary amount of data is often impractical due to patient privacy concerns or restricted time for medical annotation. Recent advances in generative models in medical imaging with a focus on diffusion-based techniques could provide realistic-looking synthetic samples as a supplement for real data. In this work, we study whether synthetic volumetric MRIs generated by the diffusion model can be used to train downstream models, e.g., semantic segmentation. We can create an arbitrarily large dataset with ground truth by conditioning the diffusion model with a segmentation mask. Thus, the additional synthetic data can be used to control the dataset diversity. Experiments revealed that downstream tasks profit from additional synthetic data. However, the effect will eventually diminish when sufficient real samples are available. We showcase the strength of the synthetic data and provide practical recommendations for using the generated data in zonal prostate segmentation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Bashkanov, O; Engelage, L; Behnel, N; Ehrlich, P; Hansen, C; Rak, M
Multimodal Data Fusion with Irregular PSA Kinetics for Automated Prostate Cancer Grading Journal Article
In: 2025.
@article{bashkanov_multimodal_2025,
title = {Multimodal Data Fusion with Irregular PSA Kinetics for Automated Prostate Cancer Grading},
author = {O Bashkanov and L Engelage and N Behnel and P Ehrlich and C Hansen and M Rak},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2024

Bashkanov, O; Rak, M; Engelage, L; Hansen, C
Automatic Patient-level Diagnosis of Prostate Disease with Fused 3D MRI and Tabular Clinical Data Proceedings Article
In: Medical Imaging with Deep Learning, pp. 1225–1238, PMLR, 2024, (ISSN: 2640-3498).
@inproceedings{bashkanov_automatic_2024,
title = {Automatic Patient-level Diagnosis of Prostate Disease with Fused 3D MRI and Tabular Clinical Data},
author = {O Bashkanov and M Rak and L Engelage and C Hansen},
url = {https://proceedings.mlr.press/v227/bashkanov24a.html},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
booktitle = {Medical Imaging with Deep Learning},
pages = {1225–1238},
publisher = {PMLR},
abstract = {Computer-aided diagnosis systems for automatic prostate cancer diagnosis can provide radiologists with decision support during image reading. However, in this case, patient-relevant information often remains unexploited due to the greater focus on the image recognition side, with various imaging devices and modalities, while omitting other potentially valuable clinical data. Therefore, our work investigates the performance of recent methods for the fusion of rich image data and heterogeneous tabular data. Those data may include patient demographics as well as laboratory data, e.g., prostate-specific antigen (PSA). Experiments on the large dataset (3800 subjects) indicated that when using the fusion method with demographic data in clinically significant prostate cancer (csPCa) detection tasks, the mean area under the receiver operating characteristic curve (ROC AUC) has improved significantly from 0.736 to 0.765. We also observed that the naïve concatenation performs similarly or even better than the textbackslashmboxstate-of-the-art fusion modules. We also achieved better prediction quality in grading prostate disease by including more samples from longitudinal PSA profiles in the tabular feature set. Thus, by including the three last PSA samples per patient, the best-performing model has reached AUC of 0.794 and a quadratic weighted kappa score (QWK) of 0.464, which constitutes a significant improvement compared with the image-only method, with ROC AUC of 0.736 and QWK of 0.342.},
note = {ISSN: 2640-3498},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2023

Bashkanov, O; Rak, M; Meyer, A; Engelage, L; Lumiani, A; Muschter, R; Hansen, C
Automatic detection of prostate cancer grades and chronic prostatitis in biparametric MRI Journal Article
In: Computer Methods and Programs in Biomedicine, vol. 239, pp. 107624, 2023, ISSN: 0169-2607.
@article{bashkanov_automatic_2023,
title = {Automatic detection of prostate cancer grades and chronic prostatitis in biparametric MRI},
author = {O Bashkanov and M Rak and A Meyer and L Engelage and A Lumiani and R Muschter and C Hansen},
url = {https://www.sciencedirect.com/science/article/pii/S0169260723002894},
doi = {10.1016/j.cmpb.2023.107624},
issn = {0169-2607},
year = {2023},
date = {2023-09-01},
urldate = {2023-09-01},
journal = {Computer Methods and Programs in Biomedicine},
volume = {239},
pages = {107624},
abstract = {Background and objective:With emerging evidence to improve prostate cancer (PCa) screening, multiparametric magnetic prostate imaging is becoming an essential noninvasive component of the diagnostic routine. Computer-aided diagnostic (CAD) tools powered by deep learning can help radiologists interpret multiple volumetric images. In this work, our objective was to examine promising methods recently proposed in the multigrade prostate cancer detection task and to suggest practical considerations regarding model training in this context. Methods:We collected 1647 fine-grained biopsy-confirmed findings, including Gleason scores and prostatitis, to form a training dataset. In our experimental framework for lesion detection, all models utilized 3D nnU-Net architecture that accounts for anisotropy in the MRI data. First, we explore an optimal range of b-values for diffusion-weighted imaging (DWI) modality and its effect on the detection of clinically significant prostate cancer (csPCa) and prostatitis using deep learning, as the optimal range is not yet clearly defined in this domain. Next, we propose a simulated multimodal shift as a data augmentation technique to compensate for the multimodal shift present in the data. Third, we study the effect of incorporating the prostatitis class alongside cancer-related findings at three different granularities of the prostate cancer class (coarse, medium, and fine) and its impact on the detection rate of the target csPCa. Furthermore, ordinal and one-hot encoded (OHE) output formulations were tested. Results: An optimal model configuration with fine class granularity (prostatitis included) and OHE has scored the lesion-wise partial Free-Response Receiver Operating Characteristic (FROC) area under the curve (AUC) of 1.94 (CI 95%: 1.76–2.11) and patient-wise ROC AUC of 0.874 (CI 95%: 0.793–0.938) in the detection of csPCa. Inclusion of the auxiliary prostatitis class has demonstrated a stable relative improvement in specificity at a false positive rate (FPR) of 1.0 per patient, with an increase of 3%, 7%, and 4% for coarse, medium, and fine class granularities. Conclusions: This paper examines several configurations for model training in the biparametric MRI setup and proposes optimal value ranges. It also shows that the fine-grained class configuration, including prostatitis, is beneficial for detecting csPCa. The ability to detect prostatitis in all low-risk cancer lesions suggests the potential to improve the quality of the early diagnosis of prostate diseases. It also implies an improved interpretability of the results by the radiologist.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Gulamhussene, G; Rak, M; Bashkanov, O; Joeres, F; Omari, J; Pech, M; Hansen, C
Transfer-learning is a key ingredient to fast deep learning-based 4D liver MRI reconstruction Journal Article
In: Scientific Reports, vol. 13, no. 1, pp. 11227, 2023, ISSN: 2045-2322, (Publisher: Nature Publishing Group).
@article{gulamhussene_transfer-learning_2023,
title = {Transfer-learning is a key ingredient to fast deep learning-based 4D liver MRI reconstruction},
author = {G Gulamhussene and M Rak and O Bashkanov and F Joeres and J Omari and M Pech and C Hansen},
url = {https://www.nature.com/articles/s41598-023-38073-1},
doi = {10.1038/s41598-023-38073-1},
issn = {2045-2322},
year = {2023},
date = {2023-07-01},
urldate = {2023-07-01},
journal = {Scientific Reports},
volume = {13},
number = {1},
pages = {11227},
abstract = {Time-resolved volumetric magnetic resonance imaging (4D MRI) could be used to address organ motion in image-guided interventions like tumor ablation. Current 4D reconstruction techniques are unsuitable for most interventional settings because they are limited to specific breathing phases, lack temporal/spatial resolution, and have long prior acquisitions or reconstruction times. Deep learning-based (DL) 4D MRI approaches promise to overcome these shortcomings but are sensitive to domain shift. This work shows that transfer learning (TL) combined with an ensembling strategy can help alleviate this key challenge. We evaluate four approaches: pre-trained models from the source domain, models directly trained from scratch on target domain data, models fine-tuned from a pre-trained model and an ensemble of fine-tuned models. For that the data base was split into 16 source and 4 target domain subjects. Comparing ensemble of fine-tuned models (N = 10) with directly learned models, we report significant improvements (P < 0.001) of the root mean squared error (RMSE) of up to 12% and the mean displacement (MDISP) of up to 17.5%. The smaller the target domain data amount, the larger the effect. This shows that TL + Ens significantly reduces beforehand acquisition time and improves reconstruction quality, rendering it a key component in making 4D MRI clinically feasible for the first time in the context of 4D organ motion models of the liver and beyond.},
note = {Publisher: Nature Publishing Group},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Gulamhussene, G; Bashkanov, O; Omari, J; Pech, M; Hansen, C; Rak, M
Using Training Samples as Transitive Information Bridges in Predicted 4D MRI Proceedings Article
In: Xue, Z; Antani, S; Zamzmi, G; Yang, F; Rajaraman, S; Huang, S; Linguraru, M; Liang, Z (Ed.): Medical Image Learning with Limited and Noisy Data, pp. 237–245, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-44917-8.
@inproceedings{gulamhussene_using_2023,
title = {Using Training Samples as Transitive Information Bridges in Predicted 4D MRI},
author = {G Gulamhussene and O Bashkanov and J Omari and M Pech and C Hansen and M Rak},
editor = {Z Xue and S Antani and G Zamzmi and F Yang and S Rajaraman and S Huang and M Linguraru and Z Liang},
doi = {10.1007/978-3-031-44917-8_23},
isbn = {978-3-031-44917-8},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
booktitle = {Medical Image Learning with Limited and Noisy Data},
pages = {237–245},
publisher = {Springer Nature Switzerland},
address = {Cham},
abstract = {The lack of real-time techniques for monitoring respiratory motion impairs the development of guidance systems for image-guided interventions. Recent works show that U-Net based real-time 4D MRI prediction methods are promising, but prone to bad image quality when small training data sets and inputs with multiple MR contrast are used. To overcome this problem, we propose a more efficient use of the spare training data and re-utilize 2D training samples as a secondary input for construction of transitive information bridges between the navigator slice primary input and the data slice prediction. We thus remove the need for a separate 3D breath-hold MRI with different MR contrast as the secondary input. Results show that our novel construction leads to improved prediction quality with very sparse training data, with a significant decrease in root mean squared error (RMSE) from 0.3 to 0.27 (p$$<2.2eˆ-16$$<2.2e-16},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2022

Gulamhussene, G; Meyer, A; Rak, M; Bashkanov, O; Omari, J; Pech, M; Hansen, C
Predicting 4D liver MRI for MR-guided interventions Journal Article
In: Computerized Medical Imaging and Graphics, vol. 101, pp. 102122, 2022, ISSN: 0895-6111.
@article{gulamhussene_predicting_2022,
title = {Predicting 4D liver MRI for MR-guided interventions},
author = {G Gulamhussene and A Meyer and M Rak and O Bashkanov and J Omari and M Pech and C Hansen},
url = {https://www.sciencedirect.com/science/article/pii/S0895611122000921},
doi = {10.1016/j.compmedimag.2022.102122},
issn = {0895-6111},
year = {2022},
date = {2022-10-01},
urldate = {2022-10-01},
journal = {Computerized Medical Imaging and Graphics},
volume = {101},
pages = {102122},
abstract = {Organ motion poses an unresolved challenge in image-guided interventions like radiation therapy, biopsies or tumor ablation. In the pursuit of solving this problem, the research field of time-resolved volumetric magnetic resonance imaging (4D MRI) has evolved. However, current techniques are unsuitable for most interventional settings because they lack sufficient temporal and/or spatial resolution or have long acquisition times. In this work, we propose a novel approach for real-time, high-resolution 4D MRI with large fields of view for MR-guided interventions. To this end, we propose a network-agnostic, end-to-end trainable, deep learning formulation that enables the prediction of a 4D liver MRI with respiratory states from a live 2D navigator MRI. Our method can be used in two ways: First, it can reconstruct high quality fast (near real-time) 4D MRI with high resolution (209×128×128 matrix size with isotropic 1.8mm voxel size and 0.6s/volume) given a dynamic interventional 2D navigator slice for guidance during an intervention. Second, it can be used for retrospective 4D reconstruction with a temporal resolution of below 0.2s/volume for motion analysis and use in radiation therapy. We report a mean target registration error (TRE) of 1.19±0.74mm, which is below voxel size. We compare our results with a state-of-the-art retrospective 4D MRI reconstruction. Visual evaluation shows comparable quality. We compare different network architectures within our formulation. We show that small training sizes with short acquisition times down to 2 min can already achieve promising results and 24 min are sufficient for high quality results. Because our method can be readily combined with earlier time reducing methods, acquisition time can be further decreased while also limiting quality loss. We show that an end-to-end, deep learning formulation is highly promising for 4D MRI reconstruction.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2021

Meyer, A; Mehrtash, A; Rak, M; Bashkanov, O; Langbein, B; Ziaei, A; Kibel, A; Tempany, C; Hansen, C; Tokuda, J
Domain adaptation for segmentation of critical structures for prostate cancer therapy Journal Article
In: Scientific Reports, vol. 11, no. 1, pp. 11480, 2021, ISSN: 2045-2322.
@article{meyer_domain_2021,
title = {Domain adaptation for segmentation of critical structures for prostate cancer therapy},
author = {A Meyer and A Mehrtash and M Rak and O Bashkanov and B Langbein and A Ziaei and A Kibel and C Tempany and C Hansen and J Tokuda},
url = {https://www.nature.com/articles/s41598-021-90294-4},
doi = {10.1038/s41598-021-90294-4},
issn = {2045-2322},
year = {2021},
date = {2021-06-01},
urldate = {2021-06-01},
journal = {Scientific Reports},
volume = {11},
number = {1},
pages = {11480},
abstract = {Preoperative assessment of the proximity of critical structures to the tumors is crucial in avoiding
unnecessary damage during prostate cancer treatment. A patient-specific 3D anatomical model
of those structures, namely the neurovascular bundles (NVB) and the external urethral sphincters
(EUS), can enable physicians to perform such assessments intuitively. As a crucial step to generate a
patient-specific anatomical model from preoperative MRI in a clinical routine, we propose a multi-class
automatic segmentation based on an anisotropic convolutional network. Our specific challenge is to
train the network model on a unique source dataset only available at a single clinical site and deploy it
to another target site without sharing the original images or labels. As network models trained on data
from a single source suffer from quality loss due to the domain shift, we propose a semi-supervised
domain adaptation (DA) method to refine the model’s performance in the target domain. Our DA
method combines transfer learning and uncertainty guided self-learning based on deep ensembles.
Experiments on the segmentation of the prostate, NVB, and EUS, show significant performance gain
with the combination of those techniques compared to pure TL and the combination of TL with simple
self-learning ( p < 0.005 for all structures using a Wilcoxon’s signed-rank test). Results on a different
task and data (Pancreas CT segmentation) demonstrate our method’s generic application capabilities.
Our method has the advantage that it does not require any further data from the source domain,
unlike the majority of recent domain adaptation strategies. This makes our method suitable for clinical
applications, where the sharing of patient data is restricted.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
unnecessary damage during prostate cancer treatment. A patient-specific 3D anatomical model
of those structures, namely the neurovascular bundles (NVB) and the external urethral sphincters
(EUS), can enable physicians to perform such assessments intuitively. As a crucial step to generate a
patient-specific anatomical model from preoperative MRI in a clinical routine, we propose a multi-class
automatic segmentation based on an anisotropic convolutional network. Our specific challenge is to
train the network model on a unique source dataset only available at a single clinical site and deploy it
to another target site without sharing the original images or labels. As network models trained on data
from a single source suffer from quality loss due to the domain shift, we propose a semi-supervised
domain adaptation (DA) method to refine the model’s performance in the target domain. Our DA
method combines transfer learning and uncertainty guided self-learning based on deep ensembles.
Experiments on the segmentation of the prostate, NVB, and EUS, show significant performance gain
with the combination of those techniques compared to pure TL and the combination of TL with simple
self-learning ( p < 0.005 for all structures using a Wilcoxon’s signed-rank test). Results on a different
task and data (Pancreas CT segmentation) demonstrate our method’s generic application capabilities.
Our method has the advantage that it does not require any further data from the source domain,
unlike the majority of recent domain adaptation strategies. This makes our method suitable for clinical
applications, where the sharing of patient data is restricted.

Bashkanov, O; Meyer, A; Schindele, D; Schostak, M; Tönnies, K; Hansen, C; Rak, M
Learning Multi-Modal Volumetric Prostate Registration With Weak Inter-Subject Spatial Correspondence Proceedings Article
In: 2021 IEEE 18th International Symposium on Biomedical Imaging (ISBI), pp. 1817–1821, 2021, (ISSN: 1945-8452).
@inproceedings{bashkanov_learning_2021,
title = {Learning Multi-Modal Volumetric Prostate Registration With Weak Inter-Subject Spatial Correspondence},
author = {O Bashkanov and A Meyer and D Schindele and M Schostak and K Tönnies and C Hansen and M Rak},
url = {https://ieeexplore.ieee.org/abstract/document/9433848},
doi = {10.1109/ISBI48211.2021.9433848},
year = {2021},
date = {2021-04-01},
urldate = {2021-04-01},
booktitle = {2021 IEEE 18th International Symposium on Biomedical Imaging (ISBI)},
pages = {1817–1821},
abstract = {Recent studies demonstrated the eligibility of convolutional neural networks (CNNs) for solving the image registration problem. CNNs enable faster transformation estimation and greater generalization capability needed for better support during medical interventions. Conventional fully-supervised training requires a lot of high-quality ground truth data such as voxel-to-voxel transformations, which typically are attained in a too tedious and error-prone manner. In our work, we use weakly-supervised learning, which optimizes the model indirectly only via segmentation masks that are a more accessible ground truth than the deformation fields. Concerning the weak supervision, we investigate two segmentation similarity measures: multiscale Dice similarity coefficient (mDSC) and the similarity between segmentation-derived signed distance maps (SDMs). We show that the combination of mDSC and SDM similarity measures results in a more accurate and natural transformation pattern together with a stronger gradient coverage. Furthermore, we introduce an auxiliary input to the neural network for the prior information about the prostate location in the MR sequence, which mostly is available preoperatively. This approach significantly outperforms the standard two-input models. With weakly labelled MR-TRUS prostate data, we showed registration quality comparable to the state-of-the-art deep learning-based method.},
note = {ISSN: 1945-8452},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2019

Bashkanov, O; Saalfeld, P; Gunasekaran, H; Jabaraj, M; Preim, B; Huber, T; Huttl, F; Kneist, W; Hansen, C
VR Multi-user Conference Room for Surgery Planning Journal Article
In: 2019.
@article{bashkanov_vr_2019,
title = {VR Multi-user Conference Room for Surgery Planning},
author = {O Bashkanov and P Saalfeld and H Gunasekaran and M Jabaraj and B Preim and T Huber and F Huttl and W Kneist and C Hansen},
year = {2019},
date = {2019-01-01},
urldate = {2019-01-01},
abstract = {Preoperative planning is a fundamental precondition for the success of the surgery. In the course of planning, the appropriate decision making must take into account the individual anatomical characteristics of the organs and the patients physical condition. Virtual reality (VR) based systems enable interaction with 3D organ models, which allows surgeons to mentally reconstruct the patient-specific organ structure more easily. Furthermore, the importance of proper team interaction and collaboration among surgeons must not be underestimated. In this work, we present the prototype for a multi-user conference room for surgery planning inside VR, where users can benefit from interaction with 3D organ models as well as 2D gray-value images. This system also enables the discussion of the surgical problems over distance. We chose liver surgery planning For evaluation purposes, but this prototype is also functional for planning other surgical procedures. A pilot study showed that surgeons found this tool helpful in preoperative planning routines. They suggest enhancements relating to avatar appearance and advance 3D model interaction.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}