Augmented Reality (AR) provides new ways for situated visualization and human-computer interaction in physical environments. Current evaluation procedures for AR applications rely primarily on questionnaires and interviews, providing qualitative means to assess usability and task solution strategies. Eye tracking extends these existing evaluation methodologies by providing indicators for visual attention to virtual and real elements in the environment. However, the analysis of viewing behavior, especially the comparison of multiple participants, is difficult to achieve in AR. Specifically, the definition of areas of interest (AOIs), which is often a prerequisite for such analysis, is cumbersome and tedious with existing approaches. To address this issue, we present a new visualization approach to define AOIs, label fixations, and investigate the resulting annotated scanpaths. Our approach utilizes automatic annotation of gaze on virtual objects and an image-based approach that also considers spatial context for the manual annotation of objects in the real world. Our results show, that with our approach, eye tracking data from AR scenes can be annotated and analyzed flexibly with respect to data aspects and annotation strategies.
%0 Journal Article
%1 N2090D:2023
%A Öney, Seyda
%A Pathmanathan, Nelusa
%A Becher, Michael
%A Sedlmair, Michael
%A Weiskopf, Daniel
%A Kurzhals, Kuno
%D 2023
%E Bujack, Roxana
%E Archambault, Daniel
%E Schreck, Tobias
%I The Eurographics Association and John Wiley & Sons Ltd.
%J Computer Graphics Forum
%K 2023 from:christinawarren visus visus:becherml visus:kurzhako visus:oeneyse visus:pathmana visus:sedlmaml visus:weiskopf
%N 3
%P 373-38412 pages
%R 10.1111/cgf.14837
%T Visual Gaze Labeling for Augmented Reality Studies
%U https://diglib.eg.org/xmlui/handle/10.1111/cgf14837
%V 42
%X Augmented Reality (AR) provides new ways for situated visualization and human-computer interaction in physical environments. Current evaluation procedures for AR applications rely primarily on questionnaires and interviews, providing qualitative means to assess usability and task solution strategies. Eye tracking extends these existing evaluation methodologies by providing indicators for visual attention to virtual and real elements in the environment. However, the analysis of viewing behavior, especially the comparison of multiple participants, is difficult to achieve in AR. Specifically, the definition of areas of interest (AOIs), which is often a prerequisite for such analysis, is cumbersome and tedious with existing approaches. To address this issue, we present a new visualization approach to define AOIs, label fixations, and investigate the resulting annotated scanpaths. Our approach utilizes automatic annotation of gaze on virtual objects and an image-based approach that also considers spatial context for the manual annotation of objects in the real world. Our results show, that with our approach, eye tracking data from AR scenes can be annotated and analyzed flexibly with respect to data aspects and annotation strategies.
@article{N2090D:2023,
abstract = {Augmented Reality (AR) provides new ways for situated visualization and human-computer interaction in physical environments. Current evaluation procedures for AR applications rely primarily on questionnaires and interviews, providing qualitative means to assess usability and task solution strategies. Eye tracking extends these existing evaluation methodologies by providing indicators for visual attention to virtual and real elements in the environment. However, the analysis of viewing behavior, especially the comparison of multiple participants, is difficult to achieve in AR. Specifically, the definition of areas of interest (AOIs), which is often a prerequisite for such analysis, is cumbersome and tedious with existing approaches. To address this issue, we present a new visualization approach to define AOIs, label fixations, and investigate the resulting annotated scanpaths. Our approach utilizes automatic annotation of gaze on virtual objects and an image-based approach that also considers spatial context for the manual annotation of objects in the real world. Our results show, that with our approach, eye tracking data from AR scenes can be annotated and analyzed flexibly with respect to data aspects and annotation strategies.},
added-at = {2023-07-11T08:33:52.000+0200},
author = {Öney, Seyda and Pathmanathan, Nelusa and Becher, Michael and Sedlmair, Michael and Weiskopf, Daniel and Kurzhals, Kuno},
biburl = {https://puma.ub.uni-stuttgart.de/bibtex/21cb765f6cbb8c6df27110c142cbb44d5/visus},
doi = {10.1111/cgf.14837},
editor = {Bujack, Roxana and Archambault, Daniel and Schreck, Tobias},
interhash = {04ae54ccf33e19a2d9c2b0683f709d2a},
intrahash = {1cb765f6cbb8c6df27110c142cbb44d5},
journal = {Computer Graphics Forum},
keywords = {2023 from:christinawarren visus visus:becherml visus:kurzhako visus:oeneyse visus:pathmana visus:sedlmaml visus:weiskopf},
number = 3,
pages = {373-38412 pages},
publisher = {The Eurographics Association and John Wiley & Sons Ltd.},
timestamp = {2023-10-12T18:07:08.000+0200},
title = {Visual Gaze Labeling for Augmented Reality Studies},
url = {https://diglib.eg.org/xmlui/handle/10.1111/cgf14837},
volume = 42,
year = 2023
}