We propose a novel method that leverages human fixations to visually decode the image a person has in mind into a photofit (facial composite). Our method combines three neural networks: An encoder, a scoring network, and a decoder. The encoder extracts image features and predicts a neural activation map for each face looked at by a human observer. A neural scoring network compares the human and neural attention and predicts a relevance score for each extracted image feature. Finally, image features are aggregated into a single feature vector as a linear combination of all features weighted by relevance which a decoder decodes into the final photofit. We train the neural scoring network on a novel dataset containing gaze data of 19 participants looking at collages of synthetic faces. We show that our method significantly outperforms a mean baseline predictor and report on a human study that shows that we can decode photofits that are visually plausible and close to the observer's mental image. Code and dataset available upon request.
%0 Conference Paper
%1 strohm21_iccv
%A Strohm, Florian
%A Sood, Ekta
%A Mayer, Sven
%A Müller, Philipp
%A Bâce, Mihai
%A Bulling, Andreas
%B Proc. IEEE International Conference on Computer Vision (ICCV)
%D 2021
%K hcics vis
%P 245-254
%R 10.1109/ICCV48922.2021.00031
%T Neural Photofit: Gaze-based Mental Image Reconstruction
%X We propose a novel method that leverages human fixations to visually decode the image a person has in mind into a photofit (facial composite). Our method combines three neural networks: An encoder, a scoring network, and a decoder. The encoder extracts image features and predicts a neural activation map for each face looked at by a human observer. A neural scoring network compares the human and neural attention and predicts a relevance score for each extracted image feature. Finally, image features are aggregated into a single feature vector as a linear combination of all features weighted by relevance which a decoder decodes into the final photofit. We train the neural scoring network on a novel dataset containing gaze data of 19 participants looking at collages of synthetic faces. We show that our method significantly outperforms a mean baseline predictor and report on a human study that shows that we can decode photofits that are visually plausible and close to the observer's mental image. Code and dataset available upon request.
@inproceedings{strohm21_iccv,
abstract = {We propose a novel method that leverages human fixations to visually decode the image a person has in mind into a photofit (facial composite). Our method combines three neural networks: An encoder, a scoring network, and a decoder. The encoder extracts image features and predicts a neural activation map for each face looked at by a human observer. A neural scoring network compares the human and neural attention and predicts a relevance score for each extracted image feature. Finally, image features are aggregated into a single feature vector as a linear combination of all features weighted by relevance which a decoder decodes into the final photofit. We train the neural scoring network on a novel dataset containing gaze data of 19 participants looking at collages of synthetic faces. We show that our method significantly outperforms a mean baseline predictor and report on a human study that shows that we can decode photofits that are visually plausible and close to the observer's mental image. Code and dataset available upon request.},
added-at = {2024-07-11T10:05:52.000+0200},
author = {Strohm, Florian and Sood, Ekta and Mayer, Sven and Müller, Philipp and Bâce, Mihai and Bulling, Andreas},
biburl = {https://puma.ub.uni-stuttgart.de/bibtex/240ed70df7f061ca41de593192c555c0f/hcics},
booktitle = {Proc. IEEE International Conference on Computer Vision (ICCV)},
code = {Available upon request.},
dataset = {Available upon request.},
doi = {10.1109/ICCV48922.2021.00031},
interhash = {38ecc86042c56ce4c08a5405bd36ee67},
intrahash = {40ed70df7f061ca41de593192c555c0f},
keywords = {hcics vis},
note = {spotlight},
pages = {245-254},
timestamp = {2024-07-11T10:11:36.000+0200},
title = {Neural Photofit: Gaze-based Mental Image Reconstruction},
year = 2021
}