Short biography
Martin Wessel is a PhD student at the Professorship of Computational Social Science and at the TUM School of Computation, Information and Technology in Munich. He holds a Bachelor’s in Philosophy & Economics (University of Bayreuth, 2020) and a Master’s in Social and Economic Data Science (University of Konstanz, 2023). His research focuses on using language models to detect media bias, evaluating and improving their robustness, and assessing their impact on media consumption.
Besides his research, Martin is a member of the management team at the Center for Digital Technology and Management (CDTM), a joint institution of both TUM and LMU Munich.
Contact
m.wessel@media-bias-research org
References
2025
Hinterreiter, Smi; Wessel, Martin; Schliski, Fabian; Echizen, Isao; Latoschik, Marc Erich; Spinde, Timo
NewsUnfold: Creating a News-Reading Application That Indicates Linguistic Media Bias and Collects Feedback Proceedings Article Forthcoming
In: Proceedings of the International AAAI Conference on Web and Social Media (ICWSM'25), AAAI, Copenhagen, Denmark, Forthcoming.
Abstract | Links | BibTeX | Tags: crowdsourcing, HITL, linguistic bias, media bias, news bias
@inproceedings{Hinterreiter2025NewsUnfold,
title = {NewsUnfold: Creating a News-Reading Application That Indicates Linguistic Media Bias and Collects Feedback},
author = {Smi Hinterreiter and Martin Wessel and Fabian Schliski and Isao Echizen and Marc Erich Latoschik and Timo Spinde},
url = {https://media-bias-research.org/wp-content/uploads/2024/07/Preprint_ICWSM_25_NewsUnfold.pdf},
year = {2025},
date = {2025-06-01},
urldate = {2025-06-01},
booktitle = {Proceedings of the International AAAI Conference on Web and Social Media (ICWSM'25)},
volume = {19},
publisher = {AAAI},
address = {Copenhagen, Denmark},
abstract = {Media bias is a multifaceted problem, leading to one-sided views and impacting decision-making. A way to address digital media bias is to detect and indicate it automatically through machine-learning methods. However, such detection is limited due to the difficulty of obtaining reliable training data. Human-in-the-loop-based feedback mechanisms have proven an effective way to facilitate the data-gathering process. Therefore, we introduce and test feedback mechanisms for the media bias domain, which we then implement on NewsUnfold, a news-reading web application to collect reader feedback on machine-generated bias highlights within online news articles. Our approach augments dataset quality by significantly increasing inter-annotator agreement by 26.31},
keywords = {crowdsourcing, HITL, linguistic bias, media bias, news bias},
pubstate = {forthcoming},
tppubtype = {inproceedings}
}
2024
Horych, Tomas; Wessel, Martin; Wahle, Jan Philip; Ruas, Terry; Wassmuth, Jerome; Greiner-Petter, Andre; Aizawa, Akiko; Gipp, Bela; Spinde, Timo
MAGPIE: Multi-Task Analysis of Media-Bias Generalization with Pre-Trained Identification of Expressions Proceedings Article
In: "Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation", 2024.
Abstract | Links | BibTeX | Tags: dataset, multi-task learning, Transfer learning
@inproceedings{Horych2024a,
title = {MAGPIE: Multi-Task Analysis of Media-Bias Generalization with Pre-Trained Identification of Expressions},
author = {Tomas Horych and Martin Wessel and Jan Philip Wahle and Terry Ruas and Jerome Wassmuth and Andre Greiner-Petter and Akiko Aizawa and Bela Gipp and Timo Spinde},
url = {https://aclanthology.org/2024.lrec-main.952},
year = {2024},
date = {2024-02-01},
urldate = {2024-02-01},
booktitle = {"Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation"},
abstract = {Media bias detection poses a complex, multifaceted problem traditionally tackled using single-task models and small in-domain datasets, consequently lacking generalizability. To address this, we introduce MAGPIE, a large-scale multi-task pre-training approach explicitly tailored for media bias detection. To enable large-scale pre-training, we construct Large Bias Mixture (LBM), a compilation of 59 bias-related tasks. MAGPIE outperforms previous approaches in media bias detection on the Bias Annotation By Experts (BABE) dataset, with a relative improvement of 3.3% F1-score. Furthermore, using a RoBERTa encoder, we show that MAGPIE needs only 15% of fine-tuning steps compared to single-task approaches. We provide insight into task learning interference and show that sentiment analysis and emotion detection help learning of all other tasks, and scaling the number of tasks leads to the best results. MAGPIE confirms that MTL is a promising approach for addressing media bias detection, enhancing the accuracy and efficiency of existing models. Furthermore, LBM is the first available resource collection focused on media bias MTL.},
keywords = {dataset, multi-task learning, Transfer learning},
pubstate = {published},
tppubtype = {inproceedings}
}
Wessel, Martin; Horych, Tomas
Beyond the Surface: Spurious Cues in Automatic Media Bias Detection Proceedings Article
In: Chakravarthi, Paul Buitelaar Bharathi B Bharathi Raja (Ed.): Proceedings of the Fourth Workshop on Language Technology for Equality, Diversity, Inclusion, pp. 21–30, Association for Computational Linguistics, 2024.
Abstract | Links | BibTeX | Tags:
@inproceedings{nokey,
title = {Beyond the Surface: Spurious Cues in Automatic Media Bias Detection},
author = {Martin Wessel and Tomas Horych},
editor = {Paul Buitelaar Bharathi B Bharathi Raja Chakravarthi},
url = {https://aclanthology.org/2024.ltedi-1.3},
year = {2024},
date = {2024-03-21},
booktitle = {Proceedings of the Fourth Workshop on Language Technology for Equality, Diversity, Inclusion},
pages = {21–30},
publisher = {Association for Computational Linguistics},
abstract = {This study investigates the robustness and generalization of transformer-based models for automatic media bias detection. We explore the behavior of current bias classifiers by analyzing feature attributions and stress-testing with adversarial datasets. The findings reveal a disproportionate focus on rare but strongly connotated words, suggesting a rather superficial understanding of linguistic bias and challenges in contextual interpretation. This problem is further highlighted by inconsistent bias assessment when stress-tested with different entities and minorities. Enhancing automatic media bias detection models is critical to improving inclusivity in media, ensuring balanced and fair representation of diverse perspectives.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2023
Spinde, Timo; Richter, Elisabeth; Wessel, Martin; Kulshrestha, Juhi; Donnay, Karsten
What do Twitter comments tell about news article bias? Assessing the impact of news article bias on its perception on Twitter Journal Article
In: Online Social Networks and Media, vol. 37-38, pp. 100264, 2023, ISSN: 2468-6964.
Abstract | Links | BibTeX | Tags: Hate speech detection, media bias, Sentiment analysis, Transfer learning
@article{SPINDE2023100264,
title = {What do Twitter comments tell about news article bias? Assessing the impact of news article bias on its perception on Twitter},
author = {Timo Spinde and Elisabeth Richter and Martin Wessel and Juhi Kulshrestha and Karsten Donnay},
url = {https://www.sciencedirect.com/science/article/pii/S246869642300023X},
doi = {https://doi.org/10.1016/j.osnem.2023.100264},
issn = {2468-6964},
year = {2023},
date = {2023-01-01},
journal = {Online Social Networks and Media},
volume = {37-38},
pages = {100264},
abstract = {News stories circulating online, especially on social media platforms, are nowadays a primary source of information. Given the nature of social media, news no longer are just news, but they are embedded in the conversations of users interacting with them. This is particularly relevant for inaccurate information or even outright misinformation because user interaction has a crucial impact on whether information is uncritically disseminated or not. Biased coverage has been shown to affect personal decision-making. Still, it remains an open question whether users are aware of the biased reporting they encounter and how they react to it. The latter is particularly relevant given that user reactions help contextualize reporting for other users and can thus help mitigate but may also exacerbate the impact of biased media coverage. This paper approaches the question from a measurement point of view, examining whether reactions to news articles on Twitter can serve as bias indicators, i.e., whether how users comment on a given article relates to its actual level of bias. We first give an overview of research on media bias before discussing key concepts related to how individuals engage with online content, focusing on the sentiment (or valance) of comments and on outright hate speech. We then present the first dataset connecting reliable human-made media bias classifications of news articles with the reactions these articles received on Twitter. We call our dataset BAT - Bias And Twitter. BAT covers 2,800 (bias-rated) news articles from 255 English-speaking news outlets. Additionally, BAT includes 175,807 comments and retweets referring to the articles. Based on BAT, we conduct a multi-feature analysis to identify comment characteristics and analyze whether Twitter reactions correlate with an article’s bias. First, we fine-tune and apply two XLNet-based classifiers for hate speech detection and sentiment analysis. Second, we relate the results of the classifiers to the article bias annotations within a multi-level regression. The results show that Twitter reactions to an article indicate its bias, and vice-versa. With a regression coefficient of 0.703 (p<0.01), we specifically present evidence that Twitter reactions to biased articles are significantly more hateful. Our analysis shows that the news outlet’s individual stance reinforces the hate-bias relationship. In future work, we will extend the dataset and analysis, including additional concepts related to media bias.},
keywords = {Hate speech detection, media bias, Sentiment analysis, Transfer learning},
pubstate = {published},
tppubtype = {article}
}
Wessel, Martin; Horych, Tomas; Ruas, Terry; Aizawa, Akiko; Gipp, Bela; Spinde, Timo
Introducing MBIB - the first Media Bias Identification Benchmark Task and Dataset Collection Proceedings Article
In: Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR ’23), ACM, New York, NY, USA, 2023, ISBN: 978-1-4503-9408-6/23/07.
Abstract | Links | BibTeX | Tags:
@inproceedings{Wessel2023,
title = {Introducing MBIB - the first Media Bias Identification Benchmark Task and Dataset Collection},
author = {Martin Wessel and Tomas Horych and Terry Ruas and Akiko Aizawa and Bela Gipp and Timo Spinde},
url = {https://media-bias-research.org/wp-content/uploads/2023/04/Wessel2023Preprint.pdf},
doi = {https://doi.org/10.1145/3539618.3591882},
isbn = {978-1-4503-9408-6/23/07},
year = {2023},
date = {2023-07-01},
urldate = {2023-07-01},
booktitle = {Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR ’23)},
publisher = {ACM},
address = {New York, NY, USA},
abstract = {Although media bias detection is a complex multi-task problem, there is, to date, no unified benchmark grouping these evaluation tasks. We introduce the Media Bias Identification Benchmark (MBIB), a comprehensive benchmark that groups different types of media bias (e.g., linguistic, cognitive, political) under a common framework to test how prospective detection techniques generalize. After reviewing 115 datasets, we select nine tasks and carefully propose 22 associated datasets for evaluating media bias detection techniques. We evaluate MBIB using state-of-the-art Transformer techniques (e.g., T5, BART). Our results suggest that while hate speech, racial bias, and gender bias are easier to detect, models struggle to handle certain bias types, e.g., cognitive and political bias. However, our results show that no single technique can outperform all the others significantly.We also find an uneven distribution of research interest and resource allocation to the individual tasks in media bias. A unified benchmark encourages the development of more robust systems and shifts the current paradigm in media bias detection evaluation towards solutions that tackle not one but multiple media bias types simultaneously.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
