@inproceedings{schwenderling_teach_2025,
title = {Teach Me Where to Look: Dual-task Attention Training in Augmented Reality},
author = {L Schwenderling and M Schotte and F Joeres and F Heinrich and L Hanke and F Huettl and T Huber and C Hansen},
url = {https://dl.acm.org/doi/10.1145/3706599.3720198},
doi = {10.1145/3706599.3720198},
isbn = {979-8-4007-1395-8},
year = {2025},
date = {2025-04-01},
urldate = {2025-04-01},
booktitle = {Proceedings of the Extended Abstracts of the CHI Conference on Human Factors in Computing Systems},
pages = {1–8},
publisher = {ACM},
address = {Yokohama Japan},
abstract = {Regular eye contact is essential in medicine to recognize signs of pain. However, it is difficult to remember this during training as attention is tied up in learning. While augmented reality (AR) has shown promising results for medical education, there is no training for attention allocation yet. Therefore, three auditory and three visual attention guidance tools in AR are evaluated for their use in medical dual-task training settings. In expert reviews with six participants in human-computer interaction and medical didactics, advantages, disadvantages, and refinements for the cues were developed. For visual cues, an overt but less occluding cue was preferred for constant visibility of the primary task. A more diegetic cue design was proposed for the auditory cues to use a patient simulation as a reminder of the regular face glance. In general, several cues were found to be suitable for gaze guidance training, requiring only minor changes for improvement.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}