@inproceedings{ivacic-etal-2023-analysis, title = "Analysis of Transfer Learning for Named Entity Recognition in {S}outh-{S}lavic Languages", author = "Iva{\v{c}}i{\v{c}}, Nikola and Tran, Thi Hong Hanh and Koloski, Boshko and Pollak, Senja and Purver, Matthew", editor = "Piskorski, Jakub and Marci{\'n}czuk, Micha{\l} and Nakov, Preslav and Ogrodniczuk, Maciej and Pollak, Senja and P{\v{r}}ib{\'a}{\v{n}}, Pavel and Rybak, Piotr and Steinberger, Josef and Yangarber, Roman", booktitle = "Proceedings of the 9th Workshop on Slavic Natural Language Processing 2023 (SlavicNLP 2023)", month = may, year = "2023", address = "Dubrovnik, Croatia", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.bsnlp-1.13", doi = "10.18653/v1/2023.bsnlp-1.13", pages = "106--112", abstract = "This paper analyzes a Named Entity Recognition task for South-Slavic languages using the pre-trained multilingual neural network models. We investigate whether the performance of the models for a target language can be improved by using data from closely related languages. We have shown that the model performance is not influenced substantially when trained with other than a target language. While for Slovene, the monolingual setting generally performs better, for Croatian and Serbian the results are slightly better in selected cross-lingual settings, but the improvements are not large. The most significant performance improvement is shown for the Serbian language, which has the smallest corpora. Therefore, fine-tuning with other closely related languages may benefit only the {``}low resource{''} languages.", }