Resources
Most recent models are published on Huggingface
[Benchmark, GitHub] MBIB – the first Media Bias Identification Benchmark Task and Dataset Collection
[Dataset, Huggingface] Anno-lexical (Lexical bias)
[Dataset, GitHub] BABE – Bias Annotations By Experts
[Dataset, Paper] BAT – Bias And Twitter
[Scale/Questionnaire to measure bias perception] Do You Think It’s Biased? How To Ask For The Perception Of Media Bias (A set of tested questions to assess media bias perception to be used in any bias-related research)
[Dataset, Zenodo] MBIC -A Media Bias Annotation Dataset Including Annotator Characteristics
Publications
2025
Horych, Tomas; Mandl, Christoph; Ruas, Terry; Greiner-Petter, Andre; Gipp, Bela; Aizawa, Akiko; Spinde, Timo
The Promises and Pitfalls of LLM Annotations in Dataset Labeling: a Case Study on Media Bias Detection Proceedings Article
In: Findings of the 2025 Conference of the The Nations of the Americas Chapter of the Association for Computational Linguistics: NAACL 2025, Association for Computational Linguistics, Albuquerque, USA, 2025.
Abstract | Links | BibTeX | Tags: dataset, lexical bias, LLMs, synthetic annotations
@inproceedings{Horych2025,
title = {The Promises and Pitfalls of LLM Annotations in Dataset Labeling: a Case Study on Media Bias Detection},
author = {Tomas Horych and Christoph Mandl and Terry Ruas and Andre Greiner-Petter and Bela Gipp and Akiko Aizawa and Timo Spinde},
url = {https://media-bias-research.org/wp-content/uploads/2025/01/Horych2025.pdf},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
booktitle = {Findings of the 2025 Conference of the The Nations of the Americas Chapter of the Association for Computational Linguistics: NAACL 2025},
publisher = {Association for Computational Linguistics},
address = {Albuquerque, USA},
abstract = {High annotation costs from hiring or crowd-sourcing complicate the creation of large, high-quality datasets needed for training reliable text classifiers. Recent research suggests using Large Language Models (LLMs) to automate the annotation process, reducing these costs while maintaining data quality. LLMs have shown promising results in annotating downstream tasks like hate speech detection and political framing. Building on the success in these areas, this study investigates whether LLMs are viable for annotating a complex task of media bias detection and whether a downstream media bias classifier can be trained on such data. We create Anno-lexical , the first large-scale dataset for media bias classification with over 48k synthetically annotated examples.
Our classifier fine-tuned on it surpasses all of the annotator LLMs by 5-9% in Mathew’s Cor- relation Coefficient (MCC) and performs close to or outperforms the model trained on human-labeled data when evaluated on two media bias benchmark datasets (BABE and BASIL). This study demonstrates how our approach significantly reduces the cost of dataset creation in the media bias domain and, by extension - the development of the classifiers, while our subse-quent behavioral stress-testing reveals some of its current limitations and trade-offs.},
keywords = {dataset, lexical bias, LLMs, synthetic annotations},
pubstate = {published},
tppubtype = {inproceedings}
}
Our classifier fine-tuned on it surpasses all of the annotator LLMs by 5-9% in Mathew’s Cor- relation Coefficient (MCC) and performs close to or outperforms the model trained on human-labeled data when evaluated on two media bias benchmark datasets (BABE and BASIL). This study demonstrates how our approach significantly reduces the cost of dataset creation in the media bias domain and, by extension - the development of the classifiers, while our subse-quent behavioral stress-testing reveals some of its current limitations and trade-offs.
2024
Horych, Tomas; Wessel, Martin; Wahle, Jan Philip; Ruas, Terry; Wassmuth, Jerome; Greiner-Petter, Andre; Aizawa, Akiko; Gipp, Bela; Spinde, Timo
MAGPIE: Multi-Task Analysis of Media-Bias Generalization with Pre-Trained Identification of Expressions Proceedings Article
In: "Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation", 2024.
Abstract | Links | BibTeX | Tags: dataset, multi-task learning, Transfer learning
@inproceedings{Horych2024a,
title = {MAGPIE: Multi-Task Analysis of Media-Bias Generalization with Pre-Trained Identification of Expressions},
author = {Tomas Horych and Martin Wessel and Jan Philip Wahle and Terry Ruas and Jerome Wassmuth and Andre Greiner-Petter and Akiko Aizawa and Bela Gipp and Timo Spinde},
url = {https://aclanthology.org/2024.lrec-main.952},
year = {2024},
date = {2024-02-01},
urldate = {2024-02-01},
booktitle = {"Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation"},
abstract = {Media bias detection poses a complex, multifaceted problem traditionally tackled using single-task models and small in-domain datasets, consequently lacking generalizability. To address this, we introduce MAGPIE, a large-scale multi-task pre-training approach explicitly tailored for media bias detection. To enable large-scale pre-training, we construct Large Bias Mixture (LBM), a compilation of 59 bias-related tasks. MAGPIE outperforms previous approaches in media bias detection on the Bias Annotation By Experts (BABE) dataset, with a relative improvement of 3.3% F1-score. Furthermore, using a RoBERTa encoder, we show that MAGPIE needs only 15% of fine-tuning steps compared to single-task approaches. We provide insight into task learning interference and show that sentiment analysis and emotion detection help learning of all other tasks, and scaling the number of tasks leads to the best results. MAGPIE confirms that MTL is a promising approach for addressing media bias detection, enhancing the accuracy and efficiency of existing models. Furthermore, LBM is the first available resource collection focused on media bias MTL.},
keywords = {dataset, multi-task learning, Transfer learning},
pubstate = {published},
tppubtype = {inproceedings}
}