We curated a list of this year’s publications — including links to social media, lab websites, and supplemental material. We have 58 full papers, 13 LBWs, one DC paper, and one Student Game Competition, and we lead five workshops. Two papers were awarded a best paper award, and four papers received an honourable mention.
Is your publication missing? Send us an email: contact@germanhci.de
CUI@CHI 2024: Building Trust in CUIs—From Design to Deployment
Smit Desai (School of Information Sciences, University of Illinois), Christina Ziying Wei (University of Toronto), Jaisie Sin (University of British Columbia), Mateusz Dubiel (University of Luxembourg), Nima Zargham (Digital Media Lab, University of Bremen), Shashank Ahire (Leibniz University Hannover), Martin Porcheron (Bold Insight, London), Anastasia Kuzminykh (University of Toronto), Minha Lee (Eindhoven University of Technology), Heloisa Candello (IBM Research), Joel E Fischer (Mixed Reality Laboratory, University of Nottingham), Cosmin Munteanu (University of Waterloo), Benjamin R. Cowan (University College Dublin)
Abstract | Tags: Workshop | Links:
@inproceedings{Desai2024Cuichi2024,
title = {CUI@CHI 2024: Building Trust in CUIs—From Design to Deployment},
author = {Smit Desai (School of Information Sciences, University of Illinois), Christina Ziying Wei (University of Toronto), Jaisie Sin (University of British Columbia), Mateusz Dubiel (University of Luxembourg), Nima Zargham (Digital Media Lab, University of Bremen), Shashank Ahire (Leibniz University Hannover), Martin Porcheron (Bold Insight, London), Anastasia Kuzminykh (University of Toronto), Minha Lee (Eindhoven University of Technology), Heloisa Candello (IBM Research), Joel E Fischer (Mixed Reality Laboratory, University of Nottingham), Cosmin Munteanu (University of Waterloo), Benjamin R. Cowan (University College Dublin)},
url = {https://www.uni-bremen.de/dmlab, website},
doi = {10.1145/3613905.3636287},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Conversational user interfaces (CUIs) have become an everyday technology for people the world over, as well as a booming area of research. Advances in voice synthesis and the emergence of chatbots powered by large language models (LLMs), notably ChatGPT, have pushed CUIs to the forefront of human-computer interaction (HCI) research and practice. Now that these technologies enable an elemental level of usability and user experience (UX), we must turn our attention to higher-order human factors: trust and reliance. In this workshop, we aim to bring together a multidisciplinary group of researchers and practitioners invested in the next phase of CUI design. Through keynotes, presentations, and breakout sessions, we will share our knowledge, identify cutting-edge resources, and fortify an international network of CUI scholars. In particular, we will engage with the complexity of trust and reliance as attitudes and behaviours that emerge when people interact with conversational agents.},
keywords = {Workshop},
pubstate = {published},
tppubtype = {inproceedings}
}
Evaluating Interactive AI: Understanding and Controlling Placebo Effects in Human-AI Interaction
Steeven Villa (LMU Munich), Robin Welsch (Aalto University), Alena Denisova, (University of York), Thomas Kosch (HU Berlin)
Abstract | Tags: Workshop | Links:
@inproceedings{Villa2024EvaluatingInteractive,
title = {Evaluating Interactive AI: Understanding and Controlling Placebo Effects in Human-AI Interaction},
author = {Steeven Villa (LMU Munich), Robin Welsch (Aalto University), Alena Denisova, (University of York), Thomas Kosch (HU Berlin)},
url = {www.hcistudio.org, website},
doi = {10.1145/3613905.3636304},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {In the medical field, patients often experience tangible benefits from treatments they expect will improve their condition, even if the treatment has no mechanism of effect. This phenomenon often obscuring scientific evaluation of human treatment is termed the "placebo effect." Latest research in human-computer interaction has shown that using cutting-edge technologies similarly raises expectations of improvement, culminating in placebo effects that undermine evaluation efforts for user studies. This workshop delves into the role of placebo effects in human-computer interaction for cutting-edge technologies such as artificial intelligence, its influence as a confounding factor in user studies, and identifies methods that researchers can adopt to reduce its impact on study findings. By the end of this workshop, attendees will be equipped to incorporate placebo control measures in their experimental designs.},
keywords = {Workshop},
pubstate = {published},
tppubtype = {inproceedings}
}
Human-Centered Explainable AI (HCXAI): Reloading Explainability in the Era of Large Language Models (LLMs)
Upol Ehsan (Georgia Institute of Technology), Elizabeth A Watkins (Intel Labs), Philipp Wintersberger (University of Applied Sciences Upper Austria), Carina Manger (Technische Hochschule Ingolstadt), Sunnie S.Y. Kim (Princeton University), Niels van Berkel (Aalborg University), Andreas Riener (Technische Hochschule Ingolstadt), Mark O Riedl (Georgia Institute of Technology)
Abstract | Tags: Workshop | Links:
@inproceedings{Ehsan2024HumancenteredExplainable,
title = {Human-Centered Explainable AI (HCXAI): Reloading Explainability in the Era of Large Language Models (LLMs)},
author = {Upol Ehsan (Georgia Institute of Technology), Elizabeth A Watkins (Intel Labs), Philipp Wintersberger (University of Applied Sciences Upper Austria), Carina Manger (Technische Hochschule Ingolstadt), Sunnie S.Y. Kim (Princeton University), Niels van Berkel (Aalborg University), Andreas Riener (Technische Hochschule Ingolstadt), Mark O Riedl (Georgia Institute of Technology)},
url = {https://hcig.thi.de/, website},
doi = {10.1145/3613905.3636311},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Human-centered XAI (HCXAI) advocates that algorithmic transparency alone is not sufficient for making AI explainable. Explainability of AI is more than just “opening” the black box — who opens it matters just as much, if not more, as the ways of opening it. In the era of Large Language Models (LLMs), is “opening the black box” still a realistic goal for XAI? In this fourth CHI workshop on Human-centered XAI (HCXAI), we build on the maturation through the previous three installments to craft the coming-of-age story of HCXAI in the era of Large Language Models (LLMs). We aim towards actionable interventions that recognize both affordances and pitfalls of XAI. The goal of the fourth installment is to question how XAI assumptions fare in the era of LLMs and examine how human-centered perspectives can be operationalized at the conceptual, methodological, and technical levels. Encouraging holistic (historical, sociological, and technical) approaches, we emphasize “operationalizing.” We seek actionable analysis frameworks, concrete design guidelines, transferable evaluation methods, and principles for accountability.},
keywords = {Workshop},
pubstate = {published},
tppubtype = {inproceedings}
}

Learning from Cycling: Discovering Lessons Learned from CyclingHCI
Andrii Matviienko (KTH Royal Institute of Technology), Mario Boot (University of Twente), Andreas Löcken (Technische Hochschule Ingolstadt), Bastian Pfelging (TU Bergakademie Freiberg), Markus Löchtefeld (Aalborg University), Tamara von Sawitzky (Technische Hochschule Ingolstadt), Gian-Luca Savino (University of St. Gallen), Miriam Sturdee (University of St. Andrews), Josh Anders (The Australian National University), Kristy Elizabeth Boyer (University of Florida), Stephen Brewster (University of Glasgow), Florian 'Floyd' Mueller (Monash University)
Abstract | Tags: Workshop | Links:
@inproceedings{Matviienko2024LearningFrom,
title = {Learning from Cycling: Discovering Lessons Learned from CyclingHCI},
author = {Andrii Matviienko (KTH Royal Institute of Technology), Mario Boot (University of Twente), Andreas Löcken (Technische Hochschule Ingolstadt), Bastian Pfelging (TU Bergakademie Freiberg), Markus Löchtefeld (Aalborg University), Tamara von Sawitzky (Technische Hochschule Ingolstadt), Gian-Luca Savino (University of St. Gallen), Miriam Sturdee (University of St. Andrews), Josh Anders (The Australian National University), Kristy Elizabeth Boyer (University of Florida), Stephen Brewster (University of Glasgow), Florian 'Floyd' Mueller (Monash University)},
url = {https://hcig.thi.de/, website},
doi = {10.1145/3613905.3636291},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Cycling plays an essential role in sustainable mobility, health, and socializing. This workshop aims to collect and discuss the lessons learned from Cycling Human-Computer Interaction (CyclingHCI). For this, we will gather researchers and experts in the field to discuss what we learned from designing, building, and evaluating CyclingHCI systems. We will start the workshop with three lessons learned from CyclingHCI defined by the organizers and their experience in the field, which include (1) a lack of theories, tools, and perspectives, (2) knowledge about designing for safety and inclusive cycling, and (3) evaluation methods and environments. Taken together, with this work, we aim to promote interactive technology to get more people cycling, profiting from the many associated benefits.},
keywords = {Workshop},
pubstate = {published},
tppubtype = {inproceedings}
}
Mobilizing Research and Regulatory Action on Dark Patterns and Deceptive Design Practices
Colin M. Gray (University, Bloomington), Johanna T. Gunawan (Northeastern University, Boston), René Schäfer (RWTH Aachen University) Nataliia Bielova (Inria Sophia Antipolis), Lorena Sanchez Chamorro (University of Luxembourg), Katie Seaborn (Tokyo Institute of Technology), Thomas Mildner (University of Bremen), Hauke Sandhaus (Cornell University)
Abstract | Tags: Workshop | Links:
@inproceedings{Gray2024MobilizingResearch,
title = {Mobilizing Research and Regulatory Action on Dark Patterns and Deceptive Design Practices},
author = {Colin M. Gray (University, Bloomington), Johanna T. Gunawan (Northeastern University, Boston), René Schäfer (RWTH Aachen University) Nataliia Bielova (Inria Sophia Antipolis), Lorena Sanchez Chamorro (University of Luxembourg), Katie Seaborn (Tokyo Institute of Technology), Thomas Mildner (University of Bremen), Hauke Sandhaus (Cornell University)},
url = {https://www.uni-bremen.de/dmlab/, website},
doi = {10.1145/3613905.3636310},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Deceptive, manipulative, and coercive practices are deeply embedded in our digital experiences, impacting our ability to make informed choices and undermining our agency and autonomy. These design practices—collectively known as “dark patterns” or “deceptive patterns”—are increasingly under legal scrutiny and sanctions, largely due to the efforts of human-computer interaction scholars that have conducted pioneering research relating to dark patterns types, definitions, and harms. In this workshop, we continue building this scholarly community with a focus on organizing for action. Our aims include: (i) building capacity around specific research questions relating to methodologies for detection; (ii) characterization of harms; and (iii) creating effective countermeasures. Through the outcomes of the workshop, we will connect our scholarship to the legal, design, and regulatory communities to inform further legislative and legal action.},
keywords = {Workshop},
pubstate = {published},
tppubtype = {inproceedings}
}