@inproceedings{nikolaev23:_argadj, abstract = {The distinction between arguments and adjuncts is a fundamental assumption of several linguistic theories. In this study, we investigate to what extent this distinction is picked up by a Transformer-based language model. We use BERT as a case study, operationalizing arguments and adjuncts as core and non-core FrameNet frame elements, respectively, and tying them to activations of particular BERT neurons. We present evidence, from English and Korean, that BERT learns more dedicated representations for arguments than for adjuncts when fine-tuned on the FrameNet frame-identification task. We also show that this distinction is already present in a weaker form in the vanilla pre-trained model.}, added-at = {2023-04-26T18:18:31.000+0200}, address = {Nancy, France}, author = {Nikolaev, Dmitry and Padó, Sebastian}, biburl = {https://puma.ub.uni-stuttgart.de/bibtex/2a999feae16c6e240857b18253302a552/sp}, booktitle = {Proceedings of IWCS}, interhash = {e17b9da7bc7833b7ca6de99aab4a5e32}, intrahash = {a999feae16c6e240857b18253302a552}, keywords = {conference myown}, timestamp = {2024-02-22T12:31:37.000+0100}, title = {The argument--adjunct distinction in {BERT}: A {FrameNet}-based investigation}, url = {https://aclanthology.org/2023.iwcs-1.23.pdf}, year = 2023 }