@inproceedings{papay18:_addres_low_resour_scenar_charac_embed, abstract = {Most modern approaches to computing word embeddings assume the availability of text corpora with billions of words. In this paper, we explore a setup where only corpora with millions of words are available, and many words in any new text are out of vocabulary. This setup is both of practical interest – modeling the situation for specific domains and low-resource languages – and of psycholinguistic interest, since it corresponds much more closely to the actual experiences and challenges of human language learning and use. We evaluate skip-gram word embeddings and two types of character-based embeddings on word relatedness prediction. On large corpora, performance of both model types is equal for frequent words, but character awareness already helps for infrequent words. Consistently, on small corpora, the character-based models perform overall better than skip-grams. The concatenation of different embeddings performs best on small corpora and robustly on large corpora.}, added-at = {2018-04-11T09:07:13.000+0200}, address = {New Orleans, LA}, author = {Papay, Sean and Padó, Sebastian and Vu, Ngoc Thang}, biburl = {https://puma.ub.uni-stuttgart.de/bibtex/264a7a5ac62059a809f108a7fcc199e03/sp}, booktitle = {Proceedings of the Second Workshop on Subword and Character Level Models}, interhash = {21c8c2970cf19946f882eb458b11172b}, intrahash = {64a7a5ac62059a809f108a7fcc199e03}, keywords = {myown workshop}, timestamp = {2018-06-02T19:29:51.000+0200}, title = {Addressing Low-Resource Scenarios with Character-aware Embeddings}, url = {http://aclweb.org/anthology/W18-1204.pdf}, year = 2018 }