Despite its importance for assessing the effectiveness of communicating information visually, fine-grained recallability of information visualisations has not been studied quantitatively so far. In this work, we propose a question-answering paradigm to study visualisation recallability and present VisRecall - a novel dataset consisting of 200 visualisations that are annotated with crowd-sourced human (N = 305) recallability scores obtained from 1,000 questions of five question types. Furthermore, we present the first computational method to predict recallability of different visualisation elements, such as the title or specific data values. We report detailed analyses of our method on VisRecall and demonstrate that it outperforms several baselines in overall recallability and FE-, F-, RV-, and U-question recallability. Our work makes fundamental contributions towards a new generation of methods to assist designers in optimising visualisations.
IEEE Transactions on Visualization and Computer Graphics (TVCG)
number
12
pages
4995-5005
volume
28
acknowledgements
Y. Wang was funded by the Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) – Project-ID 251654672 – TRR 161. M. Bâce was funded by the Swiss National Science Foundation (SNSF) through a Postdoc.Mobility Fellowship (grant number 214434) while at the University of Stuttgart. A. Bulling was funded by the European Research Council (ERC) under grant agreement 801708.
%0 Journal Article
%1 wang22_tvcg
%A Wang, Yao
%A Jiao, Chuhan
%A Bâce, Mihai
%A Bulling, Andreas
%D 2022
%J IEEE Transactions on Visualization and Computer Graphics (TVCG)
%K hcics vis
%N 12
%P 4995-5005
%R 10.1109/TVCG.2022.3198163
%T VisRecall: Quantifying Information Visualisation Recallability via Question Answering
%V 28
%X Despite its importance for assessing the effectiveness of communicating information visually, fine-grained recallability of information visualisations has not been studied quantitatively so far. In this work, we propose a question-answering paradigm to study visualisation recallability and present VisRecall - a novel dataset consisting of 200 visualisations that are annotated with crowd-sourced human (N = 305) recallability scores obtained from 1,000 questions of five question types. Furthermore, we present the first computational method to predict recallability of different visualisation elements, such as the title or specific data values. We report detailed analyses of our method on VisRecall and demonstrate that it outperforms several baselines in overall recallability and FE-, F-, RV-, and U-question recallability. Our work makes fundamental contributions towards a new generation of methods to assist designers in optimising visualisations.
@article{wang22_tvcg,
abstract = {Despite its importance for assessing the effectiveness of communicating information visually, fine-grained recallability of information visualisations has not been studied quantitatively so far. In this work, we propose a question-answering paradigm to study visualisation recallability and present VisRecall - a novel dataset consisting of 200 visualisations that are annotated with crowd-sourced human (N = 305) recallability scores obtained from 1,000 questions of five question types. Furthermore, we present the first computational method to predict recallability of different visualisation elements, such as the title or specific data values. We report detailed analyses of our method on VisRecall and demonstrate that it outperforms several baselines in overall recallability and FE-, F-, RV-, and U-question recallability. Our work makes fundamental contributions towards a new generation of methods to assist designers in optimising visualisations.},
acknowledgements = {Y. Wang was funded by the Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) – Project-ID 251654672 – TRR 161. M. Bâce was funded by the Swiss National Science Foundation (SNSF) through a Postdoc.Mobility Fellowship (grant number 214434) while at the University of Stuttgart. A. Bulling was funded by the European Research Council (ERC) under grant agreement 801708.},
added-at = {2024-07-11T10:05:52.000+0200},
author = {Wang, Yao and Jiao, Chuhan and Bâce, Mihai and Bulling, Andreas},
award = {Oral presentation},
biburl = {https://puma.ub.uni-stuttgart.de/bibtex/284ca086936750c81d9c512ed8805dc51/hcics},
dataset = {https://darus.uni-stuttgart.de/dataset.xhtml?persistentId=doi:10.18419/darus-2826},
doi = {10.1109/TVCG.2022.3198163},
interhash = {f66c55b529c7917e13e6a468c7fdac5d},
intrahash = {84ca086936750c81d9c512ed8805dc51},
journal = {IEEE Transactions on Visualization and Computer Graphics (TVCG)},
keywords = {hcics vis},
note = {spotlight},
number = 12,
pages = {4995-5005},
supp = {Yes},
timestamp = {2024-07-11T10:11:36.000+0200},
title = {VisRecall: Quantifying Information Visualisation Recallability via Question Answering},
volume = 28,
year = 2022
}