@inproceedings{Horych2025,
title = {The Promises and Pitfalls of LLM Annotations in Dataset Labeling: a Case Study on Media Bias Detection},
author = {Tomas Horych and Christoph Mandl and Terry Ruas and Andre Greiner-Petter and Bela Gipp and Akiko Aizawa and Timo Spinde},
url = {https://media-bias-research.org/wp-content/uploads/2025/01/Horych2025.pdf},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
booktitle = {Findings of the 2025 Conference of the The Nations of the Americas Chapter of the Association for Computational Linguistics: NAACL 2025},
publisher = {Association for Computational Linguistics},
address = {Albuquerque, USA},
abstract = {High annotation costs from hiring or crowd-sourcing complicate the creation of large, high-quality datasets needed for training reliable text classifiers. Recent research suggests using Large Language Models (LLMs) to automate the annotation process, reducing these costs while maintaining data quality. LLMs have shown promising results in annotating downstream tasks like hate speech detection and political framing. Building on the success in these areas, this study investigates whether LLMs are viable for annotating a complex task of media bias detection and whether a downstream media bias classifier can be trained on such data. We create Anno-lexical , the first large-scale dataset for media bias classification with over 48k synthetically annotated examples.
Our classifier fine-tuned on it surpasses all of the annotator LLMs by 5-9% in Mathew’s Cor- relation Coefficient (MCC) and performs close to or outperforms the model trained on human-labeled data when evaluated on two media bias benchmark datasets (BABE and BASIL). This study demonstrates how our approach significantly reduces the cost of dataset creation in the media bias domain and, by extension - the development of the classifiers, while our subse-quent behavioral stress-testing reveals some of its current limitations and trade-offs.},
keywords = {dataset, lexical bias, LLMs, synthetic annotations},
pubstate = {published},
tppubtype = {inproceedings}
}