Image-Text-Matching (ITM) is one of the defacto methods of learning generalized representations from a large corpus in Vision and Language (VL). However, due to the weak association between the web-collected image--text pairs, models fail to show fine-grained understanding of the combined semantics of these modalities. To this end, we propose Hard Negative Captions (HNC): an automatically created dataset containing foiled hard negative captions for ITM training towards achieving fine-grained cross-modal comprehension in VL. Additionally, we provide a challenging manually-created test set for benchmarking models on a fine-grained cross-modal mismatch with varying levels of compositional complexity. Our results show the effectiveness of training on HNC by improving the models' zero-shot capabilities in detecting mismatches on diagnostic tasks and performing robustly under noisy visual input scenarios. Also, we demonstrate that HNC models yield a comparable or better initialization for fine-tuning. Our code and data are publicly available.
%0 Conference Paper
%1 donmez-etal-2023-hnc
%A Dönmez, Esra
%A Tilli, Pascal
%A Yang, Hsiu-Yu
%A Vu, Ngoc Thang
%A Silberer, Carina
%B Proceedings of the 27th Conference on Computational Natural Language Learning (CoNLL)
%C Singapore
%D 2023
%E Jiang, Jing
%E Reitter, David
%E Deng, Shumin
%I Association for Computational Linguistics
%K EXC2075 PN6-5 PN6-5(II) selected
%P 364--388
%R 10.18653/v1/2023.conll-1.24
%T HNC: Leveraging Hard Negative Captions towards Models with Fine-Grained Visual-Linguistic Comprehension Capabilities
%U https://aclanthology.org/2023.conll-1.24
%X Image-Text-Matching (ITM) is one of the defacto methods of learning generalized representations from a large corpus in Vision and Language (VL). However, due to the weak association between the web-collected image--text pairs, models fail to show fine-grained understanding of the combined semantics of these modalities. To this end, we propose Hard Negative Captions (HNC): an automatically created dataset containing foiled hard negative captions for ITM training towards achieving fine-grained cross-modal comprehension in VL. Additionally, we provide a challenging manually-created test set for benchmarking models on a fine-grained cross-modal mismatch with varying levels of compositional complexity. Our results show the effectiveness of training on HNC by improving the models' zero-shot capabilities in detecting mismatches on diagnostic tasks and performing robustly under noisy visual input scenarios. Also, we demonstrate that HNC models yield a comparable or better initialization for fine-tuning. Our code and data are publicly available.
@inproceedings{donmez-etal-2023-hnc,
abstract = {Image-Text-Matching (ITM) is one of the defacto methods of learning generalized representations from a large corpus in Vision and Language (VL). However, due to the weak association between the web-collected image{--}text pairs, models fail to show fine-grained understanding of the combined semantics of these modalities. To this end, we propose Hard Negative Captions (HNC): an automatically created dataset containing foiled hard negative captions for ITM training towards achieving fine-grained cross-modal comprehension in VL. Additionally, we provide a challenging manually-created test set for benchmarking models on a fine-grained cross-modal mismatch with varying levels of compositional complexity. Our results show the effectiveness of training on HNC by improving the models{'} zero-shot capabilities in detecting mismatches on diagnostic tasks and performing robustly under noisy visual input scenarios. Also, we demonstrate that HNC models yield a comparable or better initialization for fine-tuning. Our code and data are publicly available.},
added-at = {2025-01-27T13:09:00.000+0100},
address = {Singapore},
author = {D{\"o}nmez, Esra and Tilli, Pascal and Yang, Hsiu-Yu and Vu, Ngoc Thang and Silberer, Carina},
biburl = {https://puma.ub.uni-stuttgart.de/bibtex/25944e92567502f815dd268d2a8973b21/testusersimtech},
booktitle = {Proceedings of the 27th Conference on Computational Natural Language Learning (CoNLL)},
doi = {10.18653/v1/2023.conll-1.24},
editor = {Jiang, Jing and Reitter, David and Deng, Shumin},
interhash = {c8bde8198bf201783db70355cc494958},
intrahash = {5944e92567502f815dd268d2a8973b21},
keywords = {EXC2075 PN6-5 PN6-5(II) selected},
month = {12},
pages = {364--388},
publisher = {Association for Computational Linguistics},
timestamp = {2025-01-27T13:09:00.000+0100},
title = {{HNC}: Leveraging Hard Negative Captions towards Models with Fine-Grained Visual-Linguistic Comprehension Capabilities},
url = {https://aclanthology.org/2023.conll-1.24},
year = 2023
}