We are in the process of curating a list of this year’s publications — including links to social media, lab websites, and supplemental material. Currently, we have 68 full papers, 23 LBWs, three Journal papers, one alt.chi paper, two SIG, two Case Studies, one Interactivity, one Student Game Competition, and we lead three workshops. One paper received a best paper award and 13 papers received an honorable mention.
Disclaimer: This list is not complete yet; the DOIs might not be working yet.
Your publication from 2025 is missing? Please enter the details in this Google Forms and send us an email that you added a publication: contact@germanhci.de
Lost in Moderation: How Commercial Content Moderation APIs Over- and Under-Moderate Group-Targeted Hate Speech and Linguistic Variations
David Hartmann (Weizenbaum Institute Berlin & Technical University Berlin), Amin Oueslati (Hertie School Berlin), Dimitri Staufer (Technical University Berlin), Lena Pohlmann (Weizenbaum Institute Berlin, Technical University Berlin), Simon Munzert (Hertie School Berlin), Hendrik Heuer (Center for Advanced Internet Studies (CAIS), University of Wuppertal, Wuppertal)
Abstract | Tags: Full Paper, Preventing Digital Harm | Links:
@inproceedings{Hartmann2025LostModeration,
title = {Lost in Moderation: How Commercial Content Moderation APIs Over- and Under-Moderate Group-Targeted Hate Speech and Linguistic Variations},
author = {David Hartmann (Weizenbaum Institute Berlin & Technical University Berlin), Amin Oueslati (Hertie School Berlin), Dimitri Staufer (Technical University Berlin), Lena Pohlmann (Weizenbaum Institute Berlin and Technical University Berlin), Simon Munzert (Hertie School Berlin), Hendrik Heuer (Center for Advanced Internet Studies (CAIS) and University of Wuppertal, Wuppertal)},
url = {https://www.cais-research.de/forschungsprogramm-vertrauenswurdige-intelligenz/, website https://www.linkedin.com/company/center-for-advanced-internet-studies/posts/?feedView=all, lab's linkedin
https://www.linkedin.com/in/hendrikheuer/, author's linkedin
https://bsky.app/profile/cais-research.bsky.social, bluesky},
doi = {10.1145/3706598.3713998},
year = {2025},
date = {2025-04-26},
urldate = {2025-04-26},
abstract = {Commercial content moderation APIs are marketed as scalable solutions to combat online hate speech. However, the reliance on these APIs risks both silencing legitimate speech, called over-moderation, and failing to protect online platforms from harmful speech, known as under-moderation. To assess such risks, this paper introduces a framework for auditing black-box NLP systems. Using the framework, we systematically evaluate five widely used commercial content moderation APIs. Analyzing five million queries based on four datasets, we find that APIs frequently rely on group identity terms, such as "black", to predict hate speech. While OpenAI's and Amazon's services perform slightly better, all providers under-moderate implicit hate speech, which uses codified messages, especially against LGBTQIA+ individuals. Simultaneously, they over-moderate counter-speech, reclaimed slurs and content related to Black, LGBTQIA+, Jewish, and Muslim people. We recommend that API providers offer better guidance on API implementation and threshold setting and more transparency on their APIs' limitations.},
keywords = {Full Paper, Preventing Digital Harm},
pubstate = {published},
tppubtype = {inproceedings}
}
Scrolling in the Deep: Analysing Contextual Influences on Intervention Effectiveness during Infinite Scrolling on Social Media
Luca-Maxim Meinhardt (Ulm University), Maryam Elhaidary (Ulm University), Mark Colley (Ulm University, UCL), Michael Rietzler (Ulm University), Jan Ole Rixen (Ulm University, KLT), Aditya Kumar Purohit (CAIS), Enrico Rukzio (Ulm University)
Abstract | Tags: Full Paper, Preventing Digital Harm | Links:
@inproceedings{Meinhardt2025ScrollingDeep,
title = {Scrolling in the Deep: Analysing Contextual Influences on Intervention Effectiveness during Infinite Scrolling on Social Media},
author = {Luca-Maxim Meinhardt (Ulm University), Maryam Elhaidary (Ulm University), Mark Colley (Ulm University, UCL), Michael Rietzler (Ulm University), Jan Ole Rixen (Ulm University, KLT), Aditya Kumar Purohit (CAIS), Enrico Rukzio (Ulm University)},
url = {https://www.uni-ulm.de/en/in/mi/hci/, website
https://www.linkedin.com/in/luca-maximmeinhardt/, linkedin},
doi = {10.1145/3706598.3713187},
year = {2025},
date = {2025-04-26},
urldate = {2025-04-26},
abstract = {Infinite scrolling on social media platforms is designed to encourage prolonged engagement, leading users to spend more time than desired, which can provoke negative emotions. Interventions to mitigate infinite scrolling have shown initial success, yet users become desensitized due to the lack of contextual relevance. Understanding how contextual factors influence intervention effectiveness remains underexplored. We conducted a 7-day user study (N=72) investigating how these contextual factors affect users' reactance and responsiveness to interventions during infinite scrolling. Our study revealed an interplay, with contextual factors such as being at home, sleepiness, and valence playing significant roles in the intervention's effectiveness. Low valence coupled with being at home slows down the responsiveness to interventions, and sleepiness lowers reactance towards interventions, increasing user acceptance of the intervention. Overall, our work contributes to a deeper understanding of user responses toward interventions and paves the way for developing more effective interventions during infinite scrolling.},
keywords = {Full Paper, Preventing Digital Harm},
pubstate = {published},
tppubtype = {inproceedings}
}
Social Media for Activists: Reimagining Safety, Content Presentation, and Workflows
Anna Ricarda Luther (Institute for Information Management Bremen), Hendrik Heuer (Center for Advanced Internet Studies & University at Wuppertal), Stephanie Geise (Zentrum für Medien-, Kommunikations- und Informationsforschung & University of Bremen), Sebastian Haunss (SOCIUM Forschungszentrum Ungleichheit und Sozialpolitik , University of Bremen), Andreas Breiter (Institute for Information Management Bremen, University of Bremen)
Abstract | Tags: Full Paper, Preventing Digital Harm | Links:
@inproceedings{Luther2025SocialMedia,
title = {Social Media for Activists: Reimagining Safety, Content Presentation, and Workflows},
author = {Anna Ricarda Luther (Institute for Information Management Bremen), Hendrik Heuer (Center for Advanced Internet Studies & University at Wuppertal), Stephanie Geise (Zentrum für Medien-, Kommunikations- und Informationsforschung & University of Bremen), Sebastian Haunss (SOCIUM Forschungszentrum Ungleichheit und Sozialpolitik , University of Bremen), Andreas Breiter (Institute for Information Management Bremen, University of Bremen)},
url = {https://www.cais-research.de/forschungsprogramm-vertrauenswurdige-intelligenz/, website https://www.linkedin.com/company/center-for-advanced-internet-studies/posts/?feedView=all, lab's linkedin
https://www.linkedin.com/in/hendrikheuer/, author's linkedin
https://bsky.app/profile/cais-research.bsky.social, bluesky},
doi = {10.1145/3706598.3713351},
year = {2025},
date = {2025-04-26},
urldate = {2025-04-26},
abstract = {Social media is central to activists, who use it internally for coordination and externally to reach supporters and the public. To date, the HCI community has not explored activists' perspectives on future social media platforms. In interviews with 14 activists from an environmental and a queer-feminist movement in Germany, we identify activists' needs and feature requests for future social media platforms. The key finding is that on- and offline safety is their main need. Based on this, we make concrete proposals to improve safety measures. Increased control over content presentation and tools to streamline activist workflows are also central to activists. We make concrete design and research recommendations on how social media platforms and the HCI community can contribute to improved safety and content presentation, and how activists themselves can reduce their workload.},
keywords = {Full Paper, Preventing Digital Harm},
pubstate = {published},
tppubtype = {inproceedings}
}
Towards Youth-Sensitive Hateful Content Reporting: An Inclusive Focus Group Study in Germany
Julian Bäumler (PEASEC, TU Darmstadt), Helen Bader (PEASEC, TU Darmstadt), Marc-André Kaufhold (PEASEC, TU Darmstadt), Christian Reuter (PEASEC, TU Darmstadt)
Honorable MentionAbstract | Tags: Full Paper, Honorable Mention, Preventing Digital Harm | Links:
@inproceedings{Baeumler2025TowardsYouthsensitive,
title = {Towards Youth-Sensitive Hateful Content Reporting: An Inclusive Focus Group Study in Germany},
author = {Julian Bäumler (PEASEC, TU Darmstadt), Helen Bader (PEASEC, TU Darmstadt), Marc-André Kaufhold (PEASEC, TU Darmstadt), Christian Reuter (PEASEC, TU Darmstadt)},
url = {https://peasec.de/, website
https://www.linkedin.com/company/peasecde/, research group linkedin
https://www.linkedin.com/in/julian-b%C3%A4umler-2667b8196/, author\\\'s linkedin
https://bsky.app/profile/peasec.de, bsky},
doi = {10.1145/3706598.3713542},
year = {2025},
date = {2025-04-26},
urldate = {2025-04-26},
abstract = {Youth are particularly likely to encounter hateful internet content, which can severely impact their well-being. While most social media provide reporting mechanisms, in several countries, severe hateful content can alternatively be reported to law enforcement or dedicated reporting centers. However, in Germany, many youth never resort to reporting. While research in human-computer interaction has investigated adults' views on platform-based reporting, youth perspectives and platform-independent alternatives have received little attention. By involving a diverse group of 47 German adolescents and young adults in eight focus group interviews, we investigate how youth-sensitive reporting systems for hateful content can be designed. We explore German youth’s reporting barriers, finding that on platforms, they feel particularly discouraged by deficient rule enforcement and feedback, while platform-independent alternatives are rather unknown and perceived as time-consuming and disruptive. We further elicit their requirements for platform-independent reporting tools and contribute with heuristics for designing youth-sensitive and inclusive reporting systems.},
keywords = {Full Paper, Honorable Mention, Preventing Digital Harm},
pubstate = {published},
tppubtype = {inproceedings}
}