We curated a list of this year’s publications — including links to social media, lab websites, and supplemental material. We have 58 full papers, 13 LBWs, one DC paper, and one Student Game Competition, and we lead five workshops. Two papers were awarded a best paper award, and four papers received an honourable mention.
Is your publication missing? Send us an email: contact@germanhci.de
‘We Do Not Have the Capacity to Monitor All Media’: A Design Case Study on Cyber Situational Awareness in Computer Emergency Response Teams
Marc-André Kaufhold (TU Darmstadt), Thea Riebe (TU Darmstadt), Markus Bayer (TU Darmstadt), Christian Reuter (TU Darmstadt)
In: 2024.
Abstract | Tags: Best Paper, Full Paper | Links:
@inproceedings{Kaufhold2024DoNot,
title = {‘We Do Not Have the Capacity to Monitor All Media’: A Design Case Study on Cyber Situational Awareness in Computer Emergency Response Teams},
author = {Marc-André Kaufhold (TU Darmstadt), Thea Riebe (TU Darmstadt), Markus Bayer (TU Darmstadt), Christian Reuter (TU Darmstadt)},
url = {www.peasec.de, website},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Computer Emergency Response Teams (CERTs) have been established in the public sector globally to provide advisory, preventive and reactive cybersecurity services for government agencies, citizens, and businesses. Nevertheless, their responsibility of monitoring, analyzing, and communicating cyber threats and security vulnerabilities have become increasingly challenging due to the growing volume and varying quality of information disseminated through public and social channels. Based on a design case study conducted from 2021 to 2023, this paper combines three iterations of expert interviews (N=25), design workshops (N=4) and cognitive walkthroughs (N=25) to design an automated, cross-platform and real-time cybersecurity dashboard. By adopting the notion of cyber situational awareness, the study further extracts user requirements and design heuristics for enhanced threat intelligence and mission awareness in CERTs, discussing the aspects of source integration, data management, customizable visualization, relationship awareness, information assessment, software integration, (inter-)organizational collaboration, and communication of stakeholder warnings.},
keywords = {Best Paper, Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
"AI enhances our performance, I have no doubt this one will do the same": The Placebo effect is robust to negative descriptions of AI
Agnes Mercedes Kloft (Aalto University), Robin Welsch (Aalto University), Thomas Kosch (HU Berlin), Steeven Villa (LMU Munich)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Kloft2024AiEnhances,
title = {"AI enhances our performance, I have no doubt this one will do the same": The Placebo effect is robust to negative descriptions of AI},
author = {Agnes Mercedes Kloft (Aalto University), Robin Welsch (Aalto University), Thomas Kosch (HU Berlin), Steeven Villa (LMU Munich)},
doi = {10.1145/3613904.3642633},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {effects. In a letter discrimination task, we informed participants that an AI would either increase or decrease their performance by adapting the interface, but in reality, no AI was present in any condition. A Bayesian analysis showed that participants had high expectations and performed descriptively better irrespective of the AI description when a sham-AI was present. Using cognitive modeling, we could trace this advantage back to participants gathering more information. A replication study verified that negative AI descriptions do not alter expectations, suggesting that performance expectations with AI are biased and robust to negative verbal descriptions. We discuss the impact of user expectations on AI interactions and evaluation.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
"If the Machine Is As Good As Me, Then What Use Am I?" – How the Use of ChatGPT Changes Young Professionals' Perception of Productivity and Accomplishment
Charlotte Kobiella (Center for Digital Technology, Management (CDTM)), Yarhy Said Flores López (Center for Digital Technology, Management (CDTM)), Franz Waltenberger (Center for Digital Technology, Management (CDTM), Technical University of Munich), Fiona Draxler (University of Mannheim, LMU Munich), Albrecht Schmidt (LMU Munich)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Kobiella2024IfMachine,
title = {"If the Machine Is As Good As Me, Then What Use Am I?" – How the Use of ChatGPT Changes Young Professionals' Perception of Productivity and Accomplishment},
author = {Charlotte Kobiella (Center for Digital Technology and Management (CDTM)), Yarhy Said Flores López (Center for Digital Technology and Management (CDTM)), Franz Waltenberger (Center for Digital Technology and Management (CDTM), Technical University of Munich), Fiona Draxler (University of Mannheim, LMU Munich), Albrecht Schmidt (LMU Munich)},
url = {http://www.medien.ifi.lmu.de, website
https://twitter.com/mimuc, social media},
doi = {10.1145/3613904.3641964},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Large language models (LLMs) like ChatGPT have been widely adopted in work contexts. We explore the impact of ChatGPT on young professionals' perception of productivity and sense of accomplishment. We collected LLMs' main use cases in knowledge work through a preliminary study, which served as the basis for a two-week diary study with 21 young professionals reflecting on their ChatGPT use. Findings indicate that ChatGPT enhanced some participants' perceptions of productivity and accomplishment by enabling greater creative output and satisfaction from efficient tool utilization. Others experienced decreased perceived productivity and accomplishment, driven by a diminished sense of ownership, perceived lack of challenge, and mediocre results. We found that the suitability of task delegation to ChatGPT varies strongly depending on the task nature. It's especially suitable for comprehending broad subject domains, generating creative solutions, and uncovering new information. It's less suitable for research tasks due to hallucinations, which necessitate extensive validation.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
“Tele” Me More: Using Telepresence Charades to Connect Strangers and Exhibits in Different Museums
Clara Sayffaerth (LMU Munich), Julian Rasch (LMU Munich), Florian Müller (LMU Munich)
In: 2024.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{Sayffaerth2024TeleMeMoreb,
title = {“Tele” Me More: Using Telepresence Charades to Connect Strangers and Exhibits in Different Museums},
author = {Clara Sayffaerth (LMU Munich), Julian Rasch (LMU Munich), Florian Müller (LMU Munich)},
url = {http://www.medien.ifi.lmu.de, website
https://twitter.com/mimuc, twitter},
doi = {https://doi.org/10.1145/3613905.3650834},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {The museum is changing from a place of passive consumption to a place of interactive experiences, opening up new ways of engaging with exhibits and others. As a promising direction, this paper explores the potential of telepresence stations in the museum context to enhance social connectedness among visitors over distance. Emphasizing the significance of social exchange, our research focuses on studying telepresence to foster interactions between strangers, share knowledge, and promote social connectedness. To do so, we first observe exhibitions and then interview individual visitors of a technical museum about their experiences and needs. Based on the results, we design appropriate voiceless and touchless communication channels and test them in a study. The findings of our in-situ user study with 24 visitors unfamiliar with each other in the museum provide insights into behaviors and perceptions, contributing valuable knowledge on seamlessly integrating telepresence technology in exhibitions, with a focus on enhancing learning, social connections, and the museum experience in general.},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
"I Know What You Mean": Context-Aware Recognition to Enhance Speech-Based Games
Nima Zargham (Digital Media Lab, University of Bremen), Mohamed Lamine Fetni (Digital Media Lab, University of Bremen), Laura Spillner (Digital Media Lab, University of Bremen), Thomas Münder (Digital Media Lab, University of Bremen), Rainer Malaka (Digital Media Lab, University of Bremen)
In: 2024.
Abstract | Tags: Full Paper, Honorable Mention | Links:
@inproceedings{Zargham2024KnowWhat,
title = {"I Know What You Mean": Context-Aware Recognition to Enhance Speech-Based Games},
author = {Nima Zargham (Digital Media Lab, University of Bremen), Mohamed Lamine Fetni (Digital Media Lab, University of Bremen), Laura Spillner (Digital Media Lab, University of Bremen), Thomas Münder (Digital Media Lab, University of Bremen), Rainer Malaka (Digital Media Lab, University of Bremen)},
url = {https://www.uni-bremen.de/dmlab, website},
doi = {10.1145/3613904.3642426},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Recent advances in language processing and speech recognition open up a large opportunity for video game companies to embrace voice interaction as an intuitive feature and appealing game mechanics. However, speech-based systems still remain liable to recognition errors. These add a layer of challenge on top of the game's existing obstacles, preventing players from reaching their goals and thus often resulting in player frustration. This work investigates a novel method called context-aware speech recognition, where the game environment and actions are used as supplementary information to enhance recognition in a speech-based game. In a between-subject user study (N=40), we compared our proposed method with a standard method in which recognition is based only on the voice input without taking context into account. Our results indicate that our proposed method could improve the player experience and the usability of the speech system.},
keywords = {Full Paper, Honorable Mention},
pubstate = {published},
tppubtype = {inproceedings}
}
3DA: Assessing 3D-Printed Electrodes for Measuring Electrodermal Activity
Martin Schmitz (Saarland University), Dominik Schön (Technical University of Darmstadt), Henning Klagemann (Technical University of Darmstadt), Thomas Kosch (HU Berlin)
In: 2024.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{Schmitz20243Da,
title = {3DA: Assessing 3D-Printed Electrodes for Measuring Electrodermal Activity},
author = {Martin Schmitz (Saarland University), Dominik Schön (Technical University of Darmstadt), Henning Klagemann (Technical University of Darmstadt), Thomas Kosch (HU Berlin)},
url = {https://hcistudio.org, website},
doi = {10.1145/3613905.3650938},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Electrodermal activity (EDA) reflects changes in skin conductance, which are closely tied to human psychophysiological states. For example, EDA sensors can assess stress, cognitive workload, arousal, or other measures tied to the sympathetic nervous system for interactive human-centered applications. Yet, current limitations involve the complex attachment and proper skin contact with EDA sensors. This paper explores the concept of 3D printing electrodes for EDA measurements, integrating sensors into arbitrary 3D-printed objects, alleviating the need for complex assembly and attachment. We examine the adaptation of conventional EDA circuits for 3D-printed electrodes, assessing different electrode shapes and their impact on the sensing accuracy. A user study (N=6) revealed that 3D-printed electrodes can measure EDA with similar accuracy, suggesting larger contact areas for improved precision. We derive design implications to facilitate the integration of EDA sensors into 3D-printed devices to foster diverse integration into everyday objects for prototyping physiological interfaces.},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
A Design Space for Intelligent and Interactive Writing Assistants
Mina Lee (Microsoft Research, United States), Katy Ilonka Gero (Harvard University, United States), John Joon Young Chung (Midjourney, United States), Simon Buckingham Shum (Connected Intelligence Centre, University of Technology Sydney, Australia), Vipul Raheja (Grammarly, United States) Hua Shen (University of Michigan, United States), Subhashini Venugopalan (Google, United States), Dr. Thiemo Wambsganss (Bern University of Applied Sciences, Switzerland), David Zhou (University of Illinois Urbana-Champaign, United States), Emad A. Alghamdi (King Abdulaziz University, Saudi Arabia), Tal August (University of Washington, United States), Avinash Bhat (McGill University, Canada), Madiha Zahrah (Cornell Tech, United States), Senjuti Dutta (University of Tennessee, United States), Jin L.C. Guo (McGill University, Canada), Md Naimul Hoque (University of Maryland, United States), Yewon Kim (KAIST, Republic of Korea), Simon Knight (University of Technology Sydney, Australia), Seyed Parsa Neshaei (EPFL, Switzerland), Dr Antonette Shibani (University of Technology Sydney, Australia), Disha Shrivastava (Google DeepMind, United Kingdom), Lila Shroff (Stanford University, United States), Agnia Sergeyuk (JetBrains Research, Serbia, Montenegro), Jessi Stark (University of Toronto, Canada), Sarah Sterman (University of Illinois, United States), Sitong Wang Columbia University, United States), Antoine Bosselut (EPFL, Switzerland), Daniel Buschek (University of Bayreuth, Germany), Joseph Chee Chang (Allen Institute for AI, United States), Sherol Chen (Google, United States), Max Kreminski (Midjourney, United States), Joonsuk Park (University of Richmond, United States), Roy Pea (Stanford University, United States), Eugenia H Rho (Virginia Tech, United States), Zejiang Shen (Massachusetts Institute of Technology, United States), Pao Siangliulue (B12, United States)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Lee2024DesignSpace,
title = {A Design Space for Intelligent and Interactive Writing Assistants},
author = {Mina Lee (Microsoft Research, United States),
Katy Ilonka Gero (Harvard University, United States),
John Joon Young Chung (Midjourney, United States),
Simon Buckingham Shum (Connected Intelligence Centre, University of Technology Sydney, Australia),
Vipul Raheja (Grammarly, United States)
Hua Shen (University of Michigan, United States),
Subhashini Venugopalan (Google, United States),
Dr. Thiemo Wambsganss (Bern University of Applied Sciences, Switzerland),
David Zhou (University of Illinois Urbana-Champaign, United States),
Emad A. Alghamdi (King Abdulaziz University, Saudi Arabia),
Tal August (University of Washington, United States),
Avinash Bhat (McGill University, Canada),
Madiha Zahrah (Cornell Tech, United States),
Senjuti Dutta (University of Tennessee, United States),
Jin L.C. Guo (McGill University, Canada),
Md Naimul Hoque (University of Maryland, United States),
Yewon Kim (KAIST, Republic of Korea),
Simon Knight (University of Technology Sydney, Australia),
Seyed Parsa Neshaei (EPFL, Switzerland),
Dr Antonette Shibani (University of Technology Sydney, Australia),
Disha Shrivastava (Google DeepMind, United Kingdom),
Lila Shroff (Stanford University, United States),
Agnia Sergeyuk (JetBrains Research, Serbia and Montenegro),
Jessi Stark (University of Toronto, Canada),
Sarah Sterman (University of Illinois, United States),
Sitong Wang Columbia University, United States),
Antoine Bosselut (EPFL, Switzerland),
Daniel Buschek (University of Bayreuth, Germany),
Joseph Chee Chang (Allen Institute for AI, United States),
Sherol Chen (Google, United States),
Max Kreminski (Midjourney, United States),
Joonsuk Park (University of Richmond, United States),
Roy Pea (Stanford University, United States),
Eugenia H Rho (Virginia Tech, United States),
Zejiang Shen (Massachusetts Institute of Technology, United States),
Pao Siangliulue (B12, United States)},
doi = {10.1145/3613904.3642697},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {In our era of rapid technological advancement, the research landscape for writing assistants has become increasingly fragmented across various research communities. We seek to address this challenge by proposing a design space as a structured way to examine and explore the multidimensional space of intelligent and interactive writing assistants. Through community collaboration, we explore five aspects of writing assistants: task, user, technology, interaction, and ecosystem. Within each aspect, we define dimensions and codes by systematically reviewing 115 papers while leveraging the expertise of researchers in various disciplines. Our design space aims to offer researchers and designers a practical tool to navigate, comprehend, and compare the various possibilities of writing assistants, and aid in the design of new writing assistants.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
A Longitudinal In-the-Wild Investigation of Design Frictions to Prevent Smartphone Overuse
Luke Haliburton (LMU Munich), David J Grüning (Heidelberg University), Frederik Riedel (riedel.wtf GmbH), Albrecht Schmidt (LMU Munich), Nađa Terzimehić (LMU Munich)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Haliburton2024LongitudinalInthewild,
title = {A Longitudinal In-the-Wild Investigation of Design Frictions to Prevent Smartphone Overuse},
author = {Luke Haliburton (LMU Munich), David J Grüning (Heidelberg University), Frederik Riedel (riedel.wtf GmbH), Albrecht Schmidt (LMU Munich), Nađa Terzimehić (LMU Munich)},
url = {https://www.medien.ifi.lmu.de/, website
https://twitter.com/mimuc, social media
and, social media
https://www.instagram.com/mediagroup.lmu/, social media},
doi = {10.1145/3613904.3642370},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Smartphone overuse is hyper-prevalent in society, and developing tools to prevent this overuse has become a focus of HCI. However, there is a lack of work investigating smartphone overuse interventions over the long term. We collected usage data from N=1,039 users of one sec over an average of 13.4 weeks and qualitative insights from 249 of the users through an online survey. We found that users overwhelmingly choose to target Social Media apps. We found that the short design frictions introduced by one sec effectively reduce how often users attempt to open target apps and lead to more intentional app-openings over time. Additionally, we found that users take periodic breaks from one sec interventions, and quickly rebound from a pattern of overuse when returning from breaks. Overall, we contribute findings from a longitudinal investigation of design frictions in the wild and identify usage patterns from real users in practice.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
A Meta-Bayesian Approach for Rapid Online Parametric Optimization for Wrist-based Interactions
Yi-Chi Liao (Aalto University & Saarland University), Ruta Desai (Fundamental AI Research, Meta), Alec M. Pierce (Reality Labs Research, Meta), Krista Taylor (Reality Labs Research, Meta), Hrvoje Benko (Reality Labs Research, Meta), Tanya Jonker (Reality Labs Research, Meta), Aakar Gupta (Reality Labs Research, Meta & Fujitsu Research America)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Liao2024MetabayesianApproach,
title = {A Meta-Bayesian Approach for Rapid Online Parametric Optimization for Wrist-based Interactions},
author = {Yi-Chi Liao (Aalto University & Saarland University), Ruta Desai (Fundamental AI Research, Meta), Alec M. Pierce (Reality Labs Research, Meta), Krista Taylor (Reality Labs Research, Meta), Hrvoje Benko (Reality Labs Research, Meta), Tanya Jonker (Reality Labs Research, Meta), Aakar Gupta (Reality Labs Research, Meta & Fujitsu Research America)},
url = {https://hci.cs.uni-saarland.de/, website
&, website
https://cix.cs.uni-saarland.de/, website},
doi = {10.1145/3613904.3642071},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {-},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
A Systematic Review of Ability-diverse Collaboration through Ability-based Lens in HCI
Lan Xiao (Global Disability Innovation Hub, University College London), Maryam Bandukda (Global Disability Innovation Hub, University College London), Katrin Angerbauer (VISUS, University of Stuttgart), Weiyue Lin (Peking University), Tigmanshu Bhatnagar (Global Disability Innovation Hub, University College London), Michael Sedlmair VISUS, (University of Stuttgart), Catherine Holloway (Global Disability Innovation Hub, University College London)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Xiao2024SystematicReview,
title = {A Systematic Review of Ability-diverse Collaboration through Ability-based Lens in HCI},
author = {Lan Xiao (Global Disability Innovation Hub, University College London), Maryam Bandukda (Global Disability Innovation Hub, University College London), Katrin Angerbauer (VISUS, University of Stuttgart), Weiyue Lin (Peking University), Tigmanshu Bhatnagar (Global Disability Innovation Hub, University College London), Michael Sedlmair VISUS, (University of Stuttgart), Catherine Holloway (Global Disability Innovation Hub, University College London)},
doi = {10.1145/3613904.3641930},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {In a world where diversity is increasingly recognised and celebrated, it is important for HCI to embrace the evolving methods and theories for technologies to reflect the diversity of its users and be ability-centric. Interdependence Theory, an example of this evolution, highlights the interpersonal relationships between humans and technologies and how technologies should be designed to meet shared goals and outcomes for people, regardless of their abilities. This necessitates a contemporary understanding of "ability-diverse collaboration," which motivated this review. In this review, we offer an analysis of 117 papers sourced from the ACM Digital Library spanning the last two decades. We contribute (1) a unified taxonomy and the Ability-Diverse Collaboration Framework, (2) a reflective discussion and mapping of the current design space, and (3) future research opportunities and challenges. Finally, we have released our data and analysis tool to encourage the HCI research community to contribute to this ongoing effort.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
An Ontology of Dark Patterns Knowledge: Foundations, Definitions, and a Pathway for Shared Knowledge-Building
Colin M. Gray (Indiana University, Bloomington), Cristiana Teixeira Santos (Utrecht University), Nataliia Bielova (Inria Sophia Antipolis), Thomas Mildner (University of Bremen)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{gray2024ontology,
title = {An Ontology of Dark Patterns Knowledge: Foundations, Definitions, and a Pathway for Shared Knowledge-Building },
author = {Colin M. Gray (Indiana University, Bloomington), Cristiana Teixeira Santos (Utrecht University), Nataliia Bielova (Inria Sophia Antipolis), Thomas Mildner (University of Bremen)},
url = {https://www.uni-bremen.de/dmlab/},
doi = {10.1145/3613904.3642436},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Deceptive and coercive design practices are increasingly used by companies to extract profit, harvest data, and limit consumer choice. Dark patterns represent the most common contemporary amalgamation of these problematic practices, connecting designers, technologists, scholars, regulators, and legal professionals in transdisciplinary dialogue. However, a lack of universally accepted definitions across the academic, legislative, practitioner, and regulatory space has likely limited the impact that scholarship on dark patterns might have in supporting sanctions and evolved design practices. In this paper, we seek to support the development of a shared language of dark patterns, harmonizing ten existing regulatory and academic taxonomies of dark patterns and proposing a three-level ontology with standardized definitions for 64 synthesized dark pattern types across low-, meso-, and high-level patterns. We illustrate how this ontology can support translational research and regulatory action, including transdisciplinary pathways to extend our initial types through new empirical work across application and technology domains.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Assessing User Apprehensions About Mixed Reality Artifacts and Applications: The Mixed Reality Concerns (MRC) Questionnaire
Christopher Katins (HU Berlin), Paweł W. Woźniak (Chalmers University of Technology), Aodi Chen (HU Berlin), Ihsan Tumay (HU Berlin), Luu Viet Trinh Le (HU Berlin), John Uschold (HU Berlin), Thomas Kosch (HU Berlin)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Katins2024AssessingUser,
title = {Assessing User Apprehensions About Mixed Reality Artifacts and Applications: The Mixed Reality Concerns (MRC) Questionnaire},
author = {Christopher Katins (HU Berlin), Paweł W. Woźniak (Chalmers University of Technology), Aodi Chen (HU Berlin), Ihsan Tumay (HU Berlin), Luu Viet Trinh Le (HU Berlin), John Uschold (HU Berlin), Thomas Kosch (HU Berlin)},
url = {hcistudio.org, website},
doi = {10.1145/3613904.3642631},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Current research in Mixed Reality (MR) presents a wide range of novel use cases for blending virtual elements with the real world. This yet-to-be-ubiquitous technology challenges how users currently work and interact with digital content. While offering many potential advantages, MR technologies introduce new security, safety, and privacy challenges. Thus, it is relevant to understand users' apprehensions towards MR technologies, ranging from security concerns to social acceptance. To address this challenge, we present the Mixed Reality Concerns (MRC) Questionnaire, designed to assess users' concerns towards MR artifacts and applications systematically. The development followed a structured process considering previous work, expert interviews, iterative refinements, and confirmatory tests to analytically validate the questionnaire. The MRC Questionnaire offers a new method of assessing users' critical opinions to compare and assess novel MR artifacts and applications regarding security, privacy, social implications, and trust.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Augmented Reality Cues Facilitate Task Resumption after Interruptions in Computer-Based and Physical Tasks
Kilian L. Bahnsen (Chair of Psychological Ergonomics, Julius-Maximilians-Universität Würzburg), Lucas Tiemann (Chair of Psychological Ergonomics, Julius-Maximilians-Universität Würzburg), Lucas Plabst (Chair for Human-Computer Interaction, Julius-Maximilians-Universität Würzburg), Tobias Grundgeiger (Chair of Psychological Ergonomics, Julius-Maximilians-Universität Würzburg)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Bahnsen2024AugmentedReality,
title = {Augmented Reality Cues Facilitate Task Resumption after Interruptions in Computer-Based and Physical Tasks},
author = {Kilian L. Bahnsen (Chair of Psychological Ergonomics, Julius-Maximilians-Universität Würzburg), Lucas Tiemann (Chair of Psychological Ergonomics, Julius-Maximilians-Universität Würzburg), Lucas Plabst (Chair for Human-Computer Interaction, Julius-Maximilians-Universität Würzburg), Tobias Grundgeiger (Chair of Psychological Ergonomics, Julius-Maximilians-Universität Würzburg)},
url = {https://www.mcm.uni-wuerzburg.de/psyergo/, website},
doi = {10.1145/3613904.3642666},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Many work domains include numerous interruptions, which can contribute to errors. We investigated the potential of augmented reality (AR) cues to facilitate primary task resumption after interruptions of varying lengths. Experiment 1 (N = 83) involved a computer-based primary task with a red AR arrow at the to-be-resumed task step which was placed via a gesture by the participants or automatically. Compared to no cue, both cues significantly reduced the resumption lag (i.e., the time between the end of the interruption and the resumption of the primary task) following long but not short interruptions. Experiment 2 (N = 38) involved a tangible sorting task, utilizing only the automatic cue. The AR cue facilitated task resumption compared to not cue after both short and long interruptions. We demonstrated the potential of AR cues in mitigating the negative effects of interruptions and make suggestions for integrating AR technologies for task resumption.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Beyond the Blink: Investigating Combined Saccadic & Blink-Suppressed Hand Redirection in Virtual Reality
André Zenner (Saarland University & DFKI), Chiara Karr (Saarland University), Martin Feick (DFKI & Saarland University), Oscar Ariza (Universität Hamburg), Antonio Krüger (Saarland University & DFKI)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Zenner2024BeyondBlink,
title = {Beyond the Blink: Investigating Combined Saccadic & Blink-Suppressed Hand Redirection in Virtual Reality},
author = {André Zenner (Saarland University & DFKI), Chiara Karr (Saarland University), Martin Feick (DFKI & Saarland University), Oscar Ariza (Universität Hamburg), Antonio Krüger (Saarland University & DFKI)},
url = {https://umtl.cs.uni-saarland.de/, website},
doi = {10.1145/3613904.3642073},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {In pursuit of hand redirection techniques that are ever more tailored to human perception, we propose the first algorithm for hand redirection in virtual reality that makes use of saccades, i.e., fast ballistic eye movements that are accompanied by the perceptual phenomenon of change blindness. Our technique combines the previously proposed approaches of gradual hand warping and blink-suppressed hand redirection with the novel approach of saccadic redirection in one unified yet simple algorithm. We compare three variants of the proposed Saccadic & Blink-Suppressed Hand Redirection (SBHR) technique with the conventional approach to redirection in a psychophysical study (N=25). Our results highlight the great potential of our proposed technique for comfortable redirection by showing that SBHR allows for significantly greater magnitudes of unnoticeable redirection while being perceived as significantly less intrusive and less noticeable than commonly employed techniques that only use gradual hand warping.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Born to Run, Programmed to Play: Mapping the Extended Reality Exergames Landscape
Sukran Karaosmanoglu (Human-Computer Interaction, Universität Hamburg), Sebastian Cmentowski (HCI Games Group, Stratford School of Interaction Design, Business, University of Waterloo), Lennart E. Nacke (HCI Games Group, Stratford School of Interaction Design, Business, University of Waterloo), Frank Steinicke (Human-Computer Interaction, Universität Hamburg)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Karaosmanoglu2024BornRun,
title = {Born to Run, Programmed to Play: Mapping the Extended Reality Exergames Landscape},
author = {Sukran Karaosmanoglu (Human-Computer Interaction, Universität Hamburg), Sebastian Cmentowski (HCI Games Group, Stratford School of Interaction Design and Business, University of Waterloo), Lennart E. Nacke (HCI Games Group, Stratford School of Interaction Design and Business, University of Waterloo), Frank Steinicke (Human-Computer Interaction, Universität Hamburg)},
url = {https://www.inf.uni-hamburg.de/en/inst/ab/hci, website
https://twitter.com/uhhhci, social media},
doi = {10.1145/3613904.3642124},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Many people struggle to exercise regularly, raising the risk of serious health-related issues. Extended reality (XR) exergames address these hurdles by combining physical exercises with enjoyable, immersive gameplay. While a growing body of research explores XR exergames, no previous review has structured this rapidly expanding research landscape. We conducted a scoping review of the current state of XR exergame research to (i) provide a structured overview, (ii) highlight trends, and (iii) uncover knowledge gaps. After identifying 1318 papers in human-computer interaction and medical databases, we ultimately included 186 papers in our analysis. We provide a quantitative and qualitative summary of XR exergame research, showing current trends and potential future considerations. Finally, we provide a taxonomy of XR exergames to help future design and methodological investigation and reporting.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Closing the Loop: The Effects of Biofeedback Awareness on Physiological Stress Response Using Electrodermal Activity in Virtual Reality
Jessica Sehrt (Frankfurt University of Applied Sciences), Ugur Yilmaz (Frankfurt University of Applied Sciences), Thomas Kosch (HU Berlin), Valentin Schwind (Frankfurt University of Applied Sciences)
In: 2024.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{Sehrt2024ClosingLoop,
title = {Closing the Loop: The Effects of Biofeedback Awareness on Physiological Stress Response Using Electrodermal Activity in Virtual Reality},
author = {Jessica Sehrt (Frankfurt University of Applied Sciences), Ugur Yilmaz (Frankfurt University of Applied Sciences), Thomas Kosch (HU Berlin), Valentin Schwind (Frankfurt University of Applied Sciences)},
doi = {10.1145/3613905.3650830},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {This paper presents the results of a user study examining the impact of biofeedback awareness on the effectiveness of stress management, utilizing Electrodermal Activity (EDA) as the primary metric within an immersive Virtual Reality (VR). Employing a between-subjects design (N=30), we probed whether informing individuals of their capacity to manipulate the VR environment's weather impacts their physiological stress responses. Our results indicate lower EDA levels of participants who were informed of their biofeedback control than those participants who were not informed about their biofeedback control. Interestingly, the participants who were informed about the control over the environment also manifested variations in their EDA responses. Participants who were not informed of their ability to control the weather showed decreased EDA measures until the end of the biofeedback phase. This study enhances our comprehension of the significance of awareness in biofeedback in immersive settings and its potential to augment stress management techniques.},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
Controlling the Rooms: How People Prefer Using Gestures to Control Their Smart Homes
Masoumehsadat Hosseini (University of Oldenburg), Heiko Müller (University of Oldenburg), Susanne Boll (University of Oldenburg)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Masoumehsadat2024controlling,
title = {Controlling the Rooms: How People Prefer Using Gestures to Control Their Smart Homes},
author = {Masoumehsadat Hosseini (University of Oldenburg), Heiko Müller (University of Oldenburg), Susanne Boll (University of Oldenburg)},
url = {https://hci.uni-oldenburg.de/ ,website
https://twitter.com/hcioldenburg ,social media},
doi = {10.1145/3613904.3642687},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Gesture interactions have become ubiquitous, and with increasingly reliable sensing technology we can anticipate their use in everyday environments such as smart homes. Gestures must meet users' needs and constraints in diverse scenarios to gain widespread acceptance. Although mid-air gestures have been proposed in various user contexts, it is still unclear to what extent users want to integrate them into different scenarios in their smart homes, along with the motivations driving this desire. Furthermore, it is uncertain whether users will remain consistent in their suggestions when transitioning to alternative scenarios within a smart home.
This study contributes methodologically by adapting a bottom-up frame-based design process. We offer insights into preferred devices and commands in different smart home scenarios. Using our results, we can assist in designing gestures in the smart home that are consistent with individual needs across devices and scenarios, while maximizing the reuse and transferability of gestural knowledge.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
This study contributes methodologically by adapting a bottom-up frame-based design process. We offer insights into preferred devices and commands in different smart home scenarios. Using our results, we can assist in designing gestures in the smart home that are consistent with individual needs across devices and scenarios, while maximizing the reuse and transferability of gestural knowledge.
Cross-Country Examination of People’s Experience with Targeted Advertising on Social Media
Smirity Kaushik (School of Information Sciences, University of Illinois at Urbana-Champaign, Champaign, Illinois, United States), Tanusree Sharma (Information Sciences, University of Illinois at Urbana Champaign, Champaign, Illinois, United States), Yaman Yu (School of Information Sciences, University of Illinois at Urbana Champaign, Champaign, Illinois, United States), Amna F Ali (UIUC, Champaign, Illinois, United States), Yang Wang (University of Illinois at Urbana-Champaign, Champaign, Illinois, United States), Yixin Zou (Max Planck Institute for Security, Privacy, Bochum, Germany)
In: 2024.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{Kaushik2024CrosscountryExamination,
title = {Cross-Country Examination of People’s Experience with Targeted Advertising on Social Media},
author = {Smirity Kaushik (School of Information Sciences, University of Illinois at Urbana-Champaign, Champaign, Illinois, United States), Tanusree Sharma (Information Sciences, University of Illinois at Urbana Champaign, Champaign, Illinois, United States), Yaman Yu (School of Information Sciences, University of Illinois at Urbana Champaign, Champaign, Illinois, United States), Amna F Ali (UIUC, Champaign, Illinois, United States), Yang Wang (University of Illinois at Urbana-Champaign, Champaign, Illinois, United States), Yixin Zou (Max Planck Institute for Security and Privacy, Bochum, Germany)},
url = {https://yixinzou.github.io/, website
https://youtu.be/aJ2xmuFk0DM, full video
https://twitter.com/yixinzouu, social media},
doi = {10.1145/3613905.3650780},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Social media effectively connects businesses with diverse audiences. However, research related to targeted advertising and social media is rarely done beyond Western contexts. Through an online survey with 412 participants in the United States and three South Asian countries (Bangladesh, India, and Pakistan), we found significant differences in participants' ad preferences, perceptions, and coping behaviors that correlate with individuals' country of origin, culture, religion, and other demographic factors. For instance, Indian and Pakistani participants preferred video ads to those in the US. Participants relying on themselves (horizontal individualism) also expressed more concerns about the security and privacy issues of targeted ads. Muslim participants were more likely to hide ads as a coping strategy than other religious groups. Our findings highlight that people's experiences with targeted advertising are rooted in their national, cultural, and religious backgrounds—an important lesson for the design of ad explanations and settings, user education, and platform governance.},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
CUI@CHI 2024: Building Trust in CUIs—From Design to Deployment
Smit Desai (School of Information Sciences, University of Illinois), Christina Ziying Wei (University of Toronto), Jaisie Sin (University of British Columbia), Mateusz Dubiel (University of Luxembourg), Nima Zargham (Digital Media Lab, University of Bremen), Shashank Ahire (Leibniz University Hannover), Martin Porcheron (Bold Insight, London), Anastasia Kuzminykh (University of Toronto), Minha Lee (Eindhoven University of Technology), Heloisa Candello (IBM Research), Joel E Fischer (Mixed Reality Laboratory, University of Nottingham), Cosmin Munteanu (University of Waterloo), Benjamin R. Cowan (University College Dublin)
In: 2024.
Abstract | Tags: Workshop | Links:
@inproceedings{Desai2024Cuichi2024,
title = {CUI@CHI 2024: Building Trust in CUIs—From Design to Deployment},
author = {Smit Desai (School of Information Sciences, University of Illinois), Christina Ziying Wei (University of Toronto), Jaisie Sin (University of British Columbia), Mateusz Dubiel (University of Luxembourg), Nima Zargham (Digital Media Lab, University of Bremen), Shashank Ahire (Leibniz University Hannover), Martin Porcheron (Bold Insight, London), Anastasia Kuzminykh (University of Toronto), Minha Lee (Eindhoven University of Technology), Heloisa Candello (IBM Research), Joel E Fischer (Mixed Reality Laboratory, University of Nottingham), Cosmin Munteanu (University of Waterloo), Benjamin R. Cowan (University College Dublin)},
url = {https://www.uni-bremen.de/dmlab, website},
doi = {10.1145/3613905.3636287},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Conversational user interfaces (CUIs) have become an everyday technology for people the world over, as well as a booming area of research. Advances in voice synthesis and the emergence of chatbots powered by large language models (LLMs), notably ChatGPT, have pushed CUIs to the forefront of human-computer interaction (HCI) research and practice. Now that these technologies enable an elemental level of usability and user experience (UX), we must turn our attention to higher-order human factors: trust and reliance. In this workshop, we aim to bring together a multidisciplinary group of researchers and practitioners invested in the next phase of CUI design. Through keynotes, presentations, and breakout sessions, we will share our knowledge, identify cutting-edge resources, and fortify an international network of CUI scholars. In particular, we will engage with the complexity of trust and reliance as attitudes and behaviours that emerge when people interact with conversational agents.},
keywords = {Workshop},
pubstate = {published},
tppubtype = {inproceedings}
}
Decide Yourself or Delegate - User Preferences Regarding the Autonomy of Personal Privacy Assistants in Private IoT-Equipped Environments
Karola Marky (Ruhr University Bochum), Alina Stöver (Technical University of Darmstadt), , Sarah Prange (University of the Bundeswehr), Kira Bleck (Technical University of Darmstadt), Paul Gerber (Technical University of Darmstadt), Verena Zimmermann (ETH Zürich), Florian Müller (LMU Munich), Florian Alt (University of the Bundeswehr), Max Mühlhäuser (Technical University of Darmstadt),
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Marky2024DecideYourself,
title = {Decide Yourself or Delegate - User Preferences Regarding the Autonomy of Personal Privacy Assistants in Private IoT-Equipped Environments},
author = {Karola Marky (Ruhr University Bochum), Alina Stöver (Technical University of Darmstadt), , Sarah Prange (University of the Bundeswehr), Kira Bleck (Technical University of Darmstadt), Paul Gerber (Technical University of Darmstadt), Verena Zimmermann (ETH Zürich), Florian Müller (LMU Munich), Florian Alt (University of the Bundeswehr), Max Mühlhäuser (Technical University of Darmstadt),},
url = {https://informatik.rub.de/digisoul/personen/marky/, website},
doi = {10.1145/3613904.3642591},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Personalized privacy assistants (PPAs) communicate privacy-related decisions of their users to Internet of Things (IoT) devices. There are different ways to implement PPAs by varying the degree of autonomy or decision model. This paper investigates user perceptions of PPA autonomy models and privacy profiles – archetypes of individual privacy needs - as a basis for PPA decisions in private environments (e.g., a friend's home). We first explore how privacy profiles can be assigned to users and propose an assignment method. Next, we investigate user perceptions in 18 usage scenarios with varying contexts, data types and number of decisions in a study with 1126 participants. We found considerable differences between the profiles in settings with few decisions. If the number of decisions gets high (> 1/h), participants exclusively preferred fully autonomous PPAs. Finally, we discuss implications and recommendations for designing scalable PPAs that serve as privacy interfaces for future IoT devices.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Design Space of Visual Feedforward And Corrective Feedback in XR-Based Motion Guidance Systems
Xingyao Yu (VISUS, University of Stuttgart), Benjamin Lee (VISUS, University of Stuttgart), Michael Sedlmair (VISUS, University of Stuttgart)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Yu2024DesignSpace,
title = {Design Space of Visual Feedforward And Corrective Feedback in XR-Based Motion Guidance Systems},
author = {Xingyao Yu (VISUS, University of Stuttgart), Benjamin Lee (VISUS, University of Stuttgart), Michael Sedlmair (VISUS, University of Stuttgart)},
url = {https://www.visus.uni-stuttgart.de/en/, website},
doi = {10.1145/3613904.3642143},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Extended reality (XR) technologies are highly suited in assisting individuals in learning motor skills and movements—referred to as motion guidance. In motion guidance, the ``feedforward’’ provides instructional cues of the motions that are to be performed, whereas the ``feedback’’ provides cues which help correct mistakes and minimize errors. Designing synergistic feedforward and feedback is vital to providing an effective learning experience, but this interplay between the two has not yet been adequately explored. Based on a survey of the literature, we propose design spaces for both motion feedforward and corrective feedback in XR, and describe the interaction effects between them. We identify common design approaches of XR-based motion guidance found in our literature corpus, and discuss them through the lens of our design dimensions. We then discuss additional contextual factors and considerations that influence this design, together with future research opportunities of motion guidance in XR.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Designing for Human Operations on the Moon: Challenges and Opportunities of Navigational HUD Interfaces
Leonie Bensch (German Aerospace Center), Tommy Nilsson (European Space Agency), Jan Wulkop (German Aerospace Center), Paul de Medeiros (European Space Agency), Nicolas Daniel Herzberger (RWTH Aachen), Michael Preutenborbeck (RWTH Aachen), Andreas Gerndt (German Aerospace Center), Frank Flemisch (RWTH Aachen), Florian Dufresne (Arts et Métiers Institute of Technology), Georgia Albuquerque (German Aerospace Center), Aidan Cowley (European Space Agency)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Bensch2024DesigningHuman,
title = {Designing for Human Operations on the Moon: Challenges and Opportunities of Navigational HUD Interfaces},
author = {Leonie Bensch (German Aerospace Center), Tommy Nilsson (European Space Agency), Jan Wulkop (German Aerospace Center), Paul de Medeiros (European Space Agency), Nicolas Daniel Herzberger (RWTH Aachen), Michael Preutenborbeck (RWTH Aachen), Andreas Gerndt (German Aerospace Center), Frank Flemisch (RWTH Aachen), Florian Dufresne (Arts et Métiers Institute of Technology), Georgia Albuquerque (German Aerospace Center), Aidan Cowley (European Space Agency)},
url = {https://www.dlr.de/sc/en/desktopdefault.aspx/tabid-1200/1659_read-3101/, website https://drive.google.com/file/d/1SqQRF5YqhHsy0J9vFQsiaN3bHOP_p9i_/view?usp=sharing, teaser video https://drive.google.com/file/d/1Q1CMuReXr9lPCTuSyxbNvg4GLVeAeJTC/view?usp=sharing, full video},
doi = {10.1145/3613904.3642859},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Future crewed missions to the Moon will face significant environmental and operational challenges, posing risks to the safety and performance of astronauts navigating its inhospitable surface. Whilst head-up displays (HUDs) have proven effective in providing intuitive navigational support on Earth, the design of novel human-spaceflight solutions typically relies on costly and time-consuming analogue deployments, leaving the potential use of lunar HUD’s largely under-explored. This paper explores an alternative approach by simulating navigational HUD concepts in a high-fidelity Virtual Reality (VR) representation of the lunar environment. In evaluating these concepts with astronauts and other aerospace experts (n=25), our mixed methods study demonstrates the efficacy of simulated analogues in facilitating rapid design assessments of early-stage HUD solutions. We illustrate this by elaborating key design challenges and guidelines for future lunar HUDs. In reflecting on the limitations of our approach, we propose directions for future design exploration of human-machine interfaces for the Moon.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Development and Validation of the Collision Anxiety Questionnaire for VR Applications
Patrizia Ring (Universität Duisburg-Essen), Julius Tietenberg (Universität Duisburg-Essen), Katharina Emmerich (Universität Duisburg-Essen), Maic Masuch (Universität Duisburg-Essen)
In: 2024.
Abstract | Tags: Full Paper, Honorable Mention | Links:
@inproceedings{Ring2024DevelopmentValidation,
title = {Development and Validation of the Collision Anxiety Questionnaire for VR Applications},
author = {Patrizia Ring (Universität Duisburg-Essen), Julius Tietenberg (Universität Duisburg-Essen), Katharina Emmerich (Universität Duisburg-Essen), Maic Masuch (Universität Duisburg-Essen)},
url = {https://www.ecg.uni-due.de/home/home.html, website
https://www.instagram.com/ecg.ude/, social media},
doi = {10.1145/3613904.3642408},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {The high degree of sensory immersion is a distinctive feature of head-mounted virtual reality (VR) systems. While the visual detachment from the real world enables unique immersive experiences, users risk collisions due to their inability to perceive physical obstacles in their environment. Even the mere anticipation of a collision can adversely affect the overall experience and erode user confidence in the VR system. However, there are currently no valid tools for assessing collision anxiety. We present the iterative development and validation of the Collision Anxiety Questionnaire (CAQ), involving an exploratory and a confirmatory factor analysis with a total of 159 participants. The results provide evidence for both discriminant and convergent validity and a good model fit for the final CAQ with three subscales: general collision anxiety, orientation, and interpersonal collision anxiety. By utilizing the CAQ, researchers can examine potential confounding effects of collision anxiety and evaluate methods for its mitigation.},
keywords = {Full Paper, Honorable Mention},
pubstate = {published},
tppubtype = {inproceedings}
}
Do You Need to Touch? Exploring Correlations between Personal Attributes and Preferences for Tangible Privacy Mechanisms
Sarah Delgado Rodriguez (University of the Bundeswehr Munich), Priyasha Chatterjee (Ruhr-University Bochum), Anh Dao Phuong (LMU Munich), Florian Alt (University of the Bundeswehr Munich), Karola Marky (Ruhr-University Bochum)
In: 2024.
Abstract | Tags: Full Paper, Honorable Mention | Links:
@inproceedings{Rodriguez2024DoNeed,
title = {Do You Need to Touch? Exploring Correlations between Personal Attributes and Preferences for Tangible Privacy Mechanisms},
author = {Sarah Delgado Rodriguez (University of the Bundeswehr Munich), Priyasha Chatterjee (Ruhr-University Bochum), Anh Dao Phuong (LMU Munich), Florian Alt (University of the Bundeswehr Munich), Karola Marky (Ruhr-University Bochum)},
url = {https://www.unibw.de/usable-security-and-privacy, website
https://youtu.be/EpthiyvegeI, teaser video
https://youtu.be/NkgLIxpVils, full video
https://twitter.com/USECUnibwM, social media},
doi = {10.1145/3613904.3642863},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {This paper explores how personal attributes, such as age, gender, technological expertise, or "need for touch", correlate with people's preferences for properties of tangible privacy protection mechanisms, for example, physically covering a camera. For this, we conducted an online survey (N = 444) where we captured participants' preferences of eight established tangible privacy mechanisms well-known in daily life, their perceptions of effective privacy protection, and personal attributes. We found that the attributes that correlated most strongly with participants' perceptions of the established tangible privacy mechanisms were their "need for touch" and previous experiences with the mechanisms. We use our findings to identify desirable characteristics of tangible mechanisms to better inform future tangible, digital, and mixed privacy protections. We also show which individuals benefit most from tangibles, ultimately motivating a more individual and effective approach to privacy protection in the future.},
keywords = {Full Paper, Honorable Mention},
pubstate = {published},
tppubtype = {inproceedings}
}
DungeonMaker: Embedding Tangible Creation and Destruction in Hybrid Board Games through Personal Fabrication Technology
Evgeny Stemasov (Institute of Media Informatics, Ulm University), Tobias Wagner (Institute of Media Informatics, Ulm University), Ali Askari (Institute of Media Informatics, Ulm University), Jessica Janek (Institute of Media Informatics, Ulm University), Omid Rajabi (Institute of Media Informatics, Ulm University), Anja Schikorr (Institute of Media Informatics, Ulm University), Julian Frommel (Utrecht University), Jan Gugenheimer (TU-Darmstadt, Institut Polytechnique de Paris, Enrico Rukzio (Institute of Media Informatics, Ulm University)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Stemasov2024Dungeonmaker,
title = {DungeonMaker: Embedding Tangible Creation and Destruction in Hybrid Board Games through Personal Fabrication Technology},
author = {Evgeny Stemasov (Institute of Media Informatics, Ulm University), Tobias Wagner (Institute of Media Informatics, Ulm University), Ali Askari (Institute of Media Informatics, Ulm University), Jessica Janek (Institute of Media Informatics, Ulm University), Omid Rajabi (Institute of Media Informatics, Ulm University), Anja Schikorr (Institute of Media Informatics, Ulm University), Julian Frommel (Utrecht University), Jan Gugenheimer (TU-Darmstadt and Institut Polytechnique de Paris, Enrico Rukzio (Institute of Media Informatics, Ulm University)},
url = {https://www.uni-ulm.de/in/mi/hci/, website
https://youtu.be/kKJD8Nv33qI, teaser video
https://youtu.be/NbIc-sOfT5Y, full video
https://twitter.com/mi_uulm, social media},
doi = {10.1145/3613904.3642243},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Hybrid board games (HBGs) augment their analog origins digitally (e.g., through apps) and are an increasingly popular pastime activity. Continuous world and character development and customization, known to facilitate engagement in video games, remain rare in HBGs. If present, they happen digitally or imaginarily, often leaving physical aspects generic. We developed DungeonMaker, a fabrication-augmented HBG bridging physical and digital game elements: 1) the setup narrates a story and projects a digital game board onto a laser cutter; 2) DungeonMaker assesses player-crafted artifacts; 3) DungeonMaker's modified laser head senses and moves player- and non-player figures, and 4) can physically damage figures. An evaluation (n=4x3) indicated that DungeonMaker provides an engaging experience, may support players' connection to their figures, and potentially spark novices' interest in fabrication. DungeonMaker provides a rich constellation to play HBGs by blending aspects of craft and automation to couple the physical and digital elements of an HBG tightly.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Effects of a Gaze-Based 2D Platform Game on User Enjoyment, Perceived Competence, and Digital Eye Strain
Mark Colley (Institute of Media Informatics, Ulm University, Cornell Tech, New York City, New York, United States), Beate Wanner (Institute of Media Informatics, Ulm University), Max Rädler (Institute of Media Informatics, Ulm University), Marcel Rötzer (Institute of Media Informatics, Ulm University), Julian Frommel (Utrecht University) Teresa Hirzle (Department of Computer Science, University of Copenhagen), Pascal Jansen (Institute of Media Informatics, Ulm University), Enrico Rukzio (Institute of Media Informatics, Ulm University)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Colley2024EffectsGazebased,
title = {Effects of a Gaze-Based 2D Platform Game on User Enjoyment, Perceived Competence, and Digital Eye Strain},
author = {Mark Colley (Institute of Media Informatics, Ulm University and Cornell Tech, New York City, New York, United States), Beate Wanner (Institute of Media Informatics, Ulm University), Max Rädler (Institute of Media Informatics, Ulm University), Marcel Rötzer (Institute of Media Informatics, Ulm University), Julian Frommel (Utrecht University) Teresa Hirzle (Department of Computer Science, University of Copenhagen), Pascal Jansen (Institute of Media Informatics, Ulm University), Enrico Rukzio (Institute of Media Informatics, Ulm University)},
url = {https://www.uni-ulm.de/en/in/mi/hci/, website https://twitter.com/mi_uulm?lang=de, social media},
doi = {10.1145/3613904.3641909},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Gaze interaction is a promising interaction method to increase variety, challenge, and fun in games. We present “Shed Some Fear”, a 2D platform game including numerous eye-gaze-based interactions. “Shed Some Fear” includes control with eye-gaze and traditional keyboard input. The eye-gaze interactions are partially based on eye exercises reducing digital eye strain but also on employing peripheral vision. By employing eye-gaze as a necessary input mechanism, we explore the effects on and tradeoffs between user enjoyment and digital eye strain in a five-day longitudinal between-subject study (N=17) compared to interaction with a traditional mouse. We found that perceived competence was significantly higher with eye gaze interaction and significantly higher internal eye strain. With this work, we contribute to the not straightforward inclusion of eye tracking as a useful and fun input method for games.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Enhancing Online Meeting Experience through Shared Gaze-Attention
Chandan Kumar (Fraunhofer IAO), Bhupender Kumar Saini (Fraunhofer IAO & University of Stuttgart), Steffen Staab (University of Stuttgart & University of Southampton)
In: 2024.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{Kumar2024EnhancingOnline,
title = {Enhancing Online Meeting Experience through Shared Gaze-Attention},
author = {Chandan Kumar (Fraunhofer IAO), Bhupender Kumar Saini (Fraunhofer IAO & University of Stuttgart), Steffen Staab (University of Stuttgart & University of Southampton)},
url = {https://www.ki.uni-stuttgart.de/departments/ac/, website @AnalyticComp, twitter},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Eye contact represents a fundamental element of human social interactions, providing essential non-verbal signals. Traditionally, it has played a crucial role in fostering social bonds during in-person gatherings. However, in the realm of virtual and online meetings, the capacity for meaningful eye contact is often compromised by the limitations of the platforms we use. In response to this challenge, we present an application framework that leverages webcams to detect and share eye gaze attention among participants. Through the framework, we organized 13 group meetings involving a total of 43 participants. The results highlight that the inclusion of gaze attention can enrich interactive experiences and elevate engagement levels in online meetings. Additionally, our evaluation of two levels of gaze sharing schemes indicates that users predominantly favor viewing gaze attention directed toward themselves, as opposed to visualizing detailed attention, which tends to lead to distraction and information overload.},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
Evaluating Interactive AI: Understanding and Controlling Placebo Effects in Human-AI Interaction
Steeven Villa (LMU Munich), Robin Welsch (Aalto University), Alena Denisova, (University of York), Thomas Kosch (HU Berlin)
In: 2024.
Abstract | Tags: Workshop | Links:
@inproceedings{Villa2024EvaluatingInteractive,
title = {Evaluating Interactive AI: Understanding and Controlling Placebo Effects in Human-AI Interaction},
author = {Steeven Villa (LMU Munich), Robin Welsch (Aalto University), Alena Denisova, (University of York), Thomas Kosch (HU Berlin)},
url = {www.hcistudio.org, website},
doi = {10.1145/3613905.3636304},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {In the medical field, patients often experience tangible benefits from treatments they expect will improve their condition, even if the treatment has no mechanism of effect. This phenomenon often obscuring scientific evaluation of human treatment is termed the "placebo effect." Latest research in human-computer interaction has shown that using cutting-edge technologies similarly raises expectations of improvement, culminating in placebo effects that undermine evaluation efforts for user studies. This workshop delves into the role of placebo effects in human-computer interaction for cutting-edge technologies such as artificial intelligence, its influence as a confounding factor in user studies, and identifies methods that researchers can adopt to reduce its impact on study findings. By the end of this workshop, attendees will be equipped to incorporate placebo control measures in their experimental designs.},
keywords = {Workshop},
pubstate = {published},
tppubtype = {inproceedings}
}
Experiencing Dynamic Weight Changes in Virtual Reality Through Pseudo-Haptics and Vibrotactile Feedback
Carolin Stellmacher (University of Bremen), Feri Irsanto Pujianto (Technical University of Berlin), Tanja Kojić (Technical University of Berlin), Jan-Niklas Voigt-Antons (Hamm-Lippstadt University of Applied Sciences), Johannes Schöning (University of St. Gallen),
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Stellmacher2024ExperiencingDynamic,
title = {Experiencing Dynamic Weight Changes in Virtual Reality Through Pseudo-Haptics and Vibrotactile Feedback},
author = {Carolin Stellmacher (University of Bremen), Feri Irsanto Pujianto (Technical University of Berlin), Tanja Kojić (Technical University of Berlin), Jan-Niklas Voigt-Antons (Hamm-Lippstadt University of Applied Sciences), Johannes Schöning (University of St. Gallen),},
url = {https://www.uni-bremen.de/dmlab, website
https://youtu.be/ygxuu-a0oRc, teaser video
https://www.youtube.com/watch?v=57Wwr6cmvgQ, full video},
doi = {10.1145/3613904.3642552},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Virtual reality (VR) objects react dynamically to users' touch interactions in real-time. However, experiencing changes in weight through the haptic sense remains challenging with consumer VR controllers due to their limited vibrotactile feedback. While prior works successfully applied pseudo-haptics to perceive absolute weight by manipulating the control-display (C/D) ratio, we continuously adjusted the C/D ratio to mimic weight changes. Vibrotactile feedback additionally emphasises the modulation in the virtual object's physicality. In a study (N=18), we compared our multimodal technique with pseudo-haptics alone and a baseline condition to assess participants' experiences of weight changes. Our findings demonstrate that participants perceived varying degrees of weight change when the C/D ratio was adjusted, validating its effectiveness for simulating dynamic weight in VR. However, the additional vibrotactile feedback did not improve weight change perception. This work extends the understanding of designing haptic experiences for lightweight VR systems by leveraging perceptual mechanisms.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Explaining It Your Way - Findings from a Co-Creative Design Workshop on Designing XAI Applications with AI End-Users from the Public Sector
Katharina Weitz (University of Augsburg), Ruben Schlagowski (University of Augsburg), Elisabeth André (University of Augsburg), Maris Männiste (University of Tartu), Ceenu George (TU Berlin)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Weitz2024ExplainingIt,
title = {Explaining It Your Way - Findings from a Co-Creative Design Workshop on Designing XAI Applications with AI End-Users from the Public Sector},
author = {Katharina Weitz (University of Augsburg), Ruben Schlagowski (University of Augsburg), Elisabeth André (University of Augsburg), Maris Männiste (University of Tartu), Ceenu George (TU Berlin)},
url = {hcai.eu, website},
doi = {10.1145/3613904.3642563},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Human-Centered AI prioritizes end-users' needs like transparency and usability. This is vital for applications that affect people's everyday lives, such as social assessment tasks in the public sector. This paper discusses our pioneering effort to involve public sector AI users in XAI application design through a co-creative workshop with unemployment consultants from Estonia. The workshop's objectives were identifying user needs and creating novel XAI interfaces for the used AI system. As a result of our user-centered design approach, consultants were able to develop AI interface prototypes that would support them in creating success stories for their clients by getting detailed feedback and suggestions. We present a discussion on the value of co-creative design methods with end-users working in the public sector to improve AI application design and provide a summary of recommendations for practitioners and researchers working on AI systems in the public sector.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Exploring Mobile Devices as Haptic Interfaces for Mixed Reality
Carolin Stellmacher (University of Bremen), Florian Mathis (University of St.Gallen), Yannick Weiss (LMU Munich), Meagan B. Loerakker (Chalmers University of Technology), Nadine Wagener (University of Bremen), Johannes Schöning (University of St. Gallen)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Stellmacher2024ExploringMobile,
title = {Exploring Mobile Devices as Haptic Interfaces for Mixed Reality},
author = {Carolin Stellmacher (University of Bremen), Florian Mathis (University of St.Gallen), Yannick Weiss (LMU Munich), Meagan B. Loerakker (Chalmers University of Technology), Nadine Wagener (University of Bremen), Johannes Schöning (University of St. Gallen)},
url = {https://www.uni-bremen.de/dmlab, website
https://www.youtube.com/watch?v=SBaaCeTH3BM, full video},
doi = {10.1145/3613904.3642176},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Dedicated handheld controllers facilitate haptic experiences of virtual objects in mixed reality (MR). However, as mobile MR becomes more prevalent, we observe the emergence of controller-free MR interactions. To retain immersive haptic experiences, we explore the use of mobile devices as a substitute for specialised MR controller. In an exploratory gesture elicitation study (n = 18), we examined users' (1) intuitive hand gestures performed with prospective mobile devices and (2) preferences for real-time haptic feedback when exploring haptic object properties. Our results reveal three haptic exploration modes for the mobile device, as an object, hand substitute, or as an additional tool, and emphasise the benefits of incorporating the device's unique physical features into the object interaction. This work expands the design possibilities using mobile devices for tangible object interaction, guiding the future design of mobile devices for haptic MR experiences.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Exploring Spatial Organization Strategies for Virtual Content in Mixed Reality Environments
Weizhou Luo (Interactive Media Lab Dresden, Technische Universität Dresden)
In: 2024.
Abstract | Tags: Doctoral Consortium | Links:
@inproceedings{Luo2024ExploringSpatial,
title = {Exploring Spatial Organization Strategies for Virtual Content in Mixed Reality Environments},
author = {Weizhou Luo (Interactive Media Lab Dresden, Technische Universität Dresden)},
url = {https://imld.de/en/, website
https://twitter.com/imldresden, social media},
doi = {10.1145/3613905.3638181},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Our future will likely be reshaped by Mixed Reality (MR) offering boundless display space while preserving the context of real-world surroundings. However, to fully leverage the spatial capabilities of MR technology, a better understanding of how and where to place virtual content like documents is required, particularly considering the situated context. I aim to explore spatial organization strategies for virtual content in MR environments. For that, we conducted empirical studies investigating users' strategies for document layout and placement and examined two real-world factors: physical environments and people present. With this knowledge, we proposed a mixed-reality approach for the in-situ exploration and analysis of human movement data utilizing physical objects in the original space as referents. My next steps include exploring arrangement strategies, designing techniques empowering spatial organization, and extending understandings for multi-user scenarios. My dissertation will enrich the immersive interface repertoire and contribute to the design of future MR systems.},
keywords = {Doctoral Consortium},
pubstate = {published},
tppubtype = {inproceedings}
}
Exploring the Association Between Engagement With Location-Based Game Features and Getting Inspired About Environmental Issues and Nature
Bastian Kordyaka (University of Bremen), Samuli Laato (Tampere University), Sebastian Weber (University of Bremen), Juho Hamari (Tampere University), Bjoern Niehaves (University of Bremen)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Kordyaka2024ExploringAssociation,
title = {Exploring the Association Between Engagement With Location-Based Game Features and Getting Inspired About Environmental Issues and Nature},
author = {Bastian Kordyaka (University of Bremen), Samuli Laato (Tampere University), Sebastian Weber (University of Bremen), Juho Hamari (Tampere University), Bjoern Niehaves (University of Bremen)},
url = {https://www.uni-bremen.de/digital-public, website},
doi = {10.1145/3613904.3642786},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Today, millions worldwide play popular location-based games (LBGs) such as Pokémon GO. LBGs are designed to be played outdoors, and past research has shown that they can incentivize players to travel to nature. To further explore this nature-connection, we investigated via a mixed-methods approach the connections between engagement with LBGs, inspiration and environmental awareness as follows. First, we identified relevant gamification features in Study 1. Based on the insights, we built a survey that we sent to Pokémon GO players (N=311) in Study 2. The results showed that (a) social networking features, reminders, and virtual objects were the most relevant gamification features to explain inspired by playing Pokémon GO and that (b) inspired to outdoor engagement partially mediated the relationship between inspired by playing Pokémon GO and environmental awareness. These results warrant further investigations into whether LBGs could motivate pro-environment attitudes and inspire people to care for nature.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Field Notes on Deploying Research Robots in Public Spaces
Fanjun Bu (Cornell Tech), Alexandra W.D. Bremers (Cornell Tech), Mark Colley (Institute of Media Informatics, Ulm University, Cornell Tech), Wendy Ju (Cornell Tech)
In: 2024.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{Bu2024FieldNotes,
title = {Field Notes on Deploying Research Robots in Public Spaces},
author = {Fanjun Bu (Cornell Tech), Alexandra W.D. Bremers (Cornell Tech), Mark Colley (Institute of Media Informatics, Ulm University and Cornell Tech), Wendy Ju (Cornell Tech)},
doi = {10.1145/3613905.3651044},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Human-robot interaction requires to be studied in the wild. In the summers of 2022 and 2023, we deployed two trash barrel service robots through the wizard-of-oz protocol in public spaces to study human-robot interactions in urban settings. We deployed the robots at two different public plazas in downtown Manhattan and Brooklyn for a collective of 20 hours of field time. To date, relatively few long-term human-robot interaction studies have been conducted in shared public spaces. To support researchers aiming to fill this gap, we would like to share some of our insights and learned lessons that would benefit both researchers and practitioners on how to deploy robots in public spaces. We share best practices and lessons learned with the HRI research community to encourage more in-the-wild research of robots in public spaces and call for the community to share their lessons learned to a GitHub repository.},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
Fighting Malicious Designs: Towards Visual Countermeasures Against Dark Patterns
René Schäfer (RWTH Aachen University), Paul Preuschoff (RWTH Aachen University), René Röpke (RWTH Aachen University), Sarah Sahabi (RWTH Aachen University), Jan Borchers (RWTH Aachen University)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Schäfer2024FightingMalicious,
title = {Fighting Malicious Designs: Towards Visual Countermeasures Against Dark Patterns},
author = {René Schäfer (RWTH Aachen University), Paul Preuschoff (RWTH Aachen University), René Röpke (RWTH Aachen University), Sarah Sahabi (RWTH Aachen University), Jan Borchers (RWTH Aachen University)},
url = {https://hci.rwth-aachen.de, website},
doi = {10.1145/3613904.3642661},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Dark patterns are malicious UI design strategies that nudge users towards decisions going against their best interests. To create technical countermeasures against them, dark patterns must be automatically detectable. While researchers have devised algorithms to detect some patterns automatically, there has only been little work to use obtained results to technically counter the effects of dark patterns when users face them on their devices. To address this, we tested three visual countermeasures against 13 common dark patterns in an interactive lab study. The countermeasures we tested either (a) highlighted and explained the manipulation, (b) hid it from the user, or (c) let the user switch between the original view and the hidden version. From our data, we were able to extract multiple clusters of dark patterns where participants preferred specific countermeasures for similar reasons. To support creating effective countermeasures, we discuss our findings with a recent ontology of dark patterns.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Flextiles: Designing Customisable Shape-Change in Textiles with SMA-Actuated Smocking Patterns
Alice Haynes (Saarland University), Jürgen Steimle (Saarland University)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Haynes2024Flextiles,
title = {Flextiles: Designing Customisable Shape-Change in Textiles with SMA-Actuated Smocking Patterns},
author = {Alice Haynes (Saarland University), Jürgen Steimle (Saarland University)},
url = {https://hci.cs.uni-saarland.de/, website
https://hci.cs.uni-saarland.de/, social media},
doi = {10.1145/3613904.3642848},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Shape Memory Alloys (SMAs) afford the seamless integration of shape-changing behaviour into textiles, enabling designers to augment apparel with dynamic shaping and styling. However, existing works fall short of providing versatile methods adaptable to varying scales, materials, and applications, curtailing designers’ capacity to prototype customised solutions. To address this, we introduce Flextiles, parameterised SMA design schema that leverage the traditional craft of smocking to integrate planar shape-change seamlessly into diverse textile projects. The conception of Flextiles stems from material experimentation and consultative dialogues with designers, whose insights inspired strategies for customising scale, elasticity, geometry, and actuation of Flextiles. To support the practical implementation of Flextiles, we provide a design tool and experimentally characterise their material properties. Lastly, through a design case study with practitioners, we explore the multifaceted applications and perspectives surrounding Flextiles, and subsequently realise four scenarios that illustrate the creative potential of these modular, customisable patterns.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
From Adolescents‘ Eyes: Assessing an Indicator-Based Intervention to Combat Misinformation on TikTok
Katrin Hartwig (TU Darmstadt), Tom Biselli (TU Darmstadt), Franziska Schneider (TU Darmstadt), Christian Reuter (TU Darmstadt)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Hartwig2024FromAdolescents,
title = {From Adolescents‘ Eyes: Assessing an Indicator-Based Intervention to Combat Misinformation on TikTok},
author = {Katrin Hartwig (TU Darmstadt), Tom Biselli (TU Darmstadt), Franziska Schneider (TU Darmstadt), Christian Reuter (TU Darmstadt)},
url = {www.peasec.de, website},
doi = {10.1145/3613904.3642264},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Misinformation poses a recurrent challenge for video-sharing platforms (VSPs) like TikTok. Obtaining user perspectives on digital interventions addressing the need for transparency (e.g., through indicators) is essential. This article offers a thorough examination of the comprehensibility, usefulness, and limitations of an indicator-based intervention from an adolescents‘ perspective. This study (N=39; aged 13-16 years) comprised two qualitative steps: (1) focus group discussions and (2) think-aloud sessions, where participants engaged with a smartphone-app for TikTok. The results offer new insights into how video-based indicators can assist adolescents‘ assessments. The intervention received positive feedback, especially for its transparency, and could be applicable to new content. This paper sheds light on how adolescents are expected to be experts while also being prone to video-based misinformation, with limited understanding of an intervention`s limitations. By adopting teenage perspectives, we contribute to HCI research and provide new insights into the chances and limitations of interventions for VSPs.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
From Real to Virtual: Exploring Replica-Enhanced Environment Transitions along the Reality-Virtuality Continuum
Fabian Pointecker (University of Applied Sciences Upper Austria, Hagenberg, Austria), Judith Friedl-Knirsch (University of Applied Sciences Upper Austria, Hagenberg, Austria), Hans-Christian Jetter (Institute for Multimedia, Interactive Systems, University of Lübeck, Lübeck, Germany), Christoph Anthes (University of Applied Sciences Upper Austria, Hagenberg, Austria),
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Pointecker2024FromReal,
title = {From Real to Virtual: Exploring Replica-Enhanced Environment Transitions along the Reality-Virtuality Continuum},
author = {Fabian Pointecker (University of Applied Sciences Upper Austria, Hagenberg, Austria), Judith Friedl-Knirsch (University of Applied Sciences Upper Austria, Hagenberg, Austria), Hans-Christian Jetter (Institute for Multimedia and Interactive Systems, University of Lübeck, Lübeck, Germany), Christoph Anthes (University of Applied Sciences Upper Austria, Hagenberg, Austria),},
url = {imis.uni-luebeck.de, website https://www.youtube.com/watch?v=ID3ITQOgzUI, full video},
doi = {10.1145/3613904.3642844},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Recent Head-Mounted Displays enable users to perceive the real environment using a video-based see-through mode and the fully virtual environment within a single display. Leveraging these advancements, we present a generic concept to seamlessly transition between the real and virtual environment, with the goal of supporting users in engaging with and disengaging from any real environment into Virtual Reality. This transition process uses a digital replica of the real environment and incorporates various stages of Milgram’s Reality-Virtuality Continuum, along with visual transitions that facilitate gradual navigation between them. We implemented the overall transition concept and four object-based transition techniques. The overall transition concept and four techniques were evaluated in a qualitative user study, focusing on user experience, the use of the replica and visual coherence. The results of the user study show, that most participants stated that the replica facilitates the cognitive processing of the transition and supports spatial orientation.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Gamification Concepts for a VR-based Visuospatial Training for Intraoperative Liver Ultrasound
Mareen Allgaier, Florentine Huettl, Laura Isabel Hanke, Tobias Huber, Bernhard Preim, Sylvia Saalfeld, Christian Hansen
In: Extended Abstracts of the 2024 CHI Conference on Human Factors in Computing Systems, Association for Computing Machinery, <conf-loc> <city>Honolulu</city> <state>HI</state> <country>USA</country> </conf-loc>, 2024, ISBN: 9798400703317.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{10.1145/3613905.3650736,
title = {Gamification Concepts for a VR-based Visuospatial Training for Intraoperative Liver Ultrasound},
author = {Mareen Allgaier and Florentine Huettl and Laura Isabel Hanke and Tobias Huber and Bernhard Preim and Sylvia Saalfeld and Christian Hansen},
url = {https://doi.org/10.1145/3613905.3650736},
doi = {10.1145/3613905.3650736},
isbn = {9798400703317},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
booktitle = {Extended Abstracts of the 2024 CHI Conference on Human Factors in Computing Systems},
publisher = {Association for Computing Machinery},
address = {<conf-loc> <city>Honolulu</city> <state>HI</state> <country>USA</country> </conf-loc>},
series = {CHI EA '24},
abstract = {Gamification is widely used due to its positive influence on learning by adding emotions and steering behavior. In medical VR training applications, the use of gamification is rare, and when it is implemented, it often lacks thoughtful design decisions and empirical evaluation. Using a VR-based training for intraoperative ultrasound for liver surgery, we analyzed game elements regarding their suitability and examined two in more detail: difficulty levels and a kit, where the user has to assemble a virtual liver using US. In a broad audience study, levels achieved significantly better results regarding enjoyment. Qualitative feedback from medical students directly comparing the elements revealed that they prefer the kit as well as levels for training. Our studies indicate that levels and the more interactive kit improve the learning experience, which could also be taken as a basis for similar VR-based medical training applications.},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
Giving Robots a Voice: Human-in-the-Loop Voice Creation and open-ended Labeling
Pol van Rijn (Max Planck Institute for Empirical Aesthetics, Frankfurt), Silvan Mertes (University of Augsburg), Kathrin Janowski (University of Augsburg), Katharina Weitz (University of Augsburg), Nori Jacoby (Max Planck Institute for Empirical Aesthetics, Frankfurt), Elisabeth André (University of Augsburg)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Rijn2024GivingRobots,
title = {Giving Robots a Voice: Human-in-the-Loop Voice Creation and open-ended Labeling},
author = {Pol van Rijn (Max Planck Institute for Empirical Aesthetics, Frankfurt), Silvan Mertes (University of Augsburg), Kathrin Janowski (University of Augsburg), Katharina Weitz (University of Augsburg), Nori Jacoby (Max Planck Institute for Empirical Aesthetics, Frankfurt), Elisabeth André (University of Augsburg)},
url = {hcai.eu, website
https://www.instagram.com/hcailab/, instagram
https://www.facebook.com/hcailab, facebook
https://www.youtube.com/user/HCMLab, youtube
https://twitter.com/hcailab, twitter},
doi = {10.1145/3613904.3642038},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Speech is a natural interface for humans to interact with robots. Yet, aligning a robot’s voice to its appearance is challenging due to the rich vocabulary of both modalities. Previous research has explored a few labels to describe robots and tested them on a limited number of robots and existing voices. Here, we develop a robot-voice creation tool followed by large-scale behavioral human experiments (N=2,505). First, participants collectively tune robotic voices to match 175 robot images using an adaptive human-in-the-loop pipeline. Then, participants describe their impression of the robot or their matched voice using another human-in-the-loop paradigm for open-ended labeling. The elicited taxonomy is then used to rate robot attributes and to predict the best voice for an unseen robot. We offer a web interface to aid engineers in customizing robot voices, demonstrating the synergy between cognitive science and machine learning for engineering tools.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Grand Challenges in SportsHCI
Don Samitha Elvitigala (Monash University, Melbourne), Armağan Karahanoğlu (University of Twente, Enschede), Andrii Matviienko (KTH Royal Institute of Technology, Stockholm), Laia Turmo Vidal (Universidad Carlos III de Madrid), Dees Postma (University of Twente, Enschede), Michael D Jones (Brigham Young University, Provo), Maria F. Montoya (Monash University, Melbourne), Daniel Harrison (Northumbria University, Newcastle upon Tyne), Lars Elbæk (University of Southern Denmark, Odense), Florian Daiber (DFKI, Saarland Informatics Campus, Saarbrücken), Lisa Anneke Burr (University of Salzburg), Rakesh Patibanda (Monash University, Melbourne), Paolo Buono (University of Bari Aldo Moro, Bari), Perttu Hämäläinen (Aalto University, Espoo), Robby van Delden (University of Twente, Enschede), Professor Regina Bernhaupt (Eindhoven University of Technology), , Dr. Xipei Ren (Beijing Institute of Technology), Vincent van Rheden (University of Salzburg), Fabio Zambetta (RMIT University, Melbourne), Elise van den Hoven (University of Technology Sydney, Eindhoven University of Technology, Eindhoven), Carine Lallemand (Eindhoven University of Technology, University of Luxembourg, Esch-sur-Alzette), Dennis Reidsma (University of Twente, Enschede), Florian ‘Floyd’ Mueller (Monash University, Melbourne)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Elvitigala2024GrandChallenges,
title = {Grand Challenges in SportsHCI},
author = {Don Samitha Elvitigala (Monash University, Melbourne), Armağan Karahanoğlu (University of Twente, Enschede), Andrii Matviienko (KTH Royal Institute of Technology, Stockholm), Laia Turmo Vidal (Universidad Carlos III de Madrid), Dees Postma (University of Twente, Enschede), Michael D Jones (Brigham Young University, Provo), Maria F. Montoya (Monash University, Melbourne), Daniel Harrison (Northumbria University, Newcastle upon Tyne), Lars Elbæk (University of Southern Denmark, Odense), Florian Daiber (DFKI, Saarland Informatics Campus, Saarbrücken), Lisa Anneke Burr (University of Salzburg), Rakesh Patibanda (Monash University, Melbourne), Paolo Buono (University of Bari Aldo Moro, Bari), Perttu Hämäläinen (Aalto University, Espoo), Robby van Delden (University of Twente, Enschede), Professor Regina Bernhaupt (Eindhoven University of Technology), , Dr. Xipei Ren (Beijing Institute of Technology), Vincent van Rheden (University of Salzburg), Fabio Zambetta (RMIT University, Melbourne), Elise van den Hoven (University of Technology Sydney, Eindhoven University of Technology, Eindhoven), Carine Lallemand (Eindhoven University of Technology, University of Luxembourg, Esch-sur-Alzette), Dennis Reidsma (University of Twente, Enschede), Florian ‘Floyd’ Mueller (Monash University, Melbourne)},
url = {https://umtl.cs.uni-saarland.de, website},
doi = {10.1145/3613904.3642050},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {The field of Sports Human-Computer Interaction (SportsHCI) investigates interaction design to support a physically active human being. Despite growing interest and dissemination of SportsHCI literature over the past years, many publications still focus on solving specific problems in a given sport. We believe in the benefit of generating fundamental knowledge for SportsHCI more broadly to advance the field as a whole. To achieve this, we aim to identify the grand challenges in SportsHCI, which can help researchers and practitioners in developing a future research agenda. Hence, this paper presents a set of grand challenges identified in a five-day workshop with 22 experts who have previously researched, designed, and deployed SportsHCI systems. Addressing these challenges will drive transformative advancements in SportsHCI, fostering better athlete performance, athlete-coach relationships, spectator engagement, but also immersive experiences for recreational sports or exercise motivation, and ultimately, improve human well-being.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
HILL: A Hallucination Identifier for Large Language Models
Florian Leiser (KIT), Merlin Knäble (KIT), Sven Eckhardt (University of Zürich), Valentin Leuthe (KIT), Alexander Maedche (KIT), Gerhard Schwabe (University of Zürich), Ali Sunyaev (KIT)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Leiser2024Hill,
title = {HILL: A Hallucination Identifier for Large Language Models},
author = {Florian Leiser (KIT), Merlin Knäble (KIT), Sven Eckhardt (University of Zürich), Valentin Leuthe (KIT), Alexander Maedche (KIT), Gerhard Schwabe (University of Zürich), Ali Sunyaev (KIT)},
url = {https://h-lab.iism.kit.edu/, website},
doi = {10.1145/3613904.3642428},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {LLMs and corresponding hallucinations which can lead to misinterpretations and errors. To tackle the problem of overreliance, we propose HILL, the "Hallucination Identifier for Large Language Models". First, we identified design features for HILL with a Wizard of Oz approach with nine participants. Subsequently, we implemented HILL based on the identified design features and evaluated HILL’s interface design by surveying 17 participants. Further, we investigated HILL’s functionality to identify hallucinations based on an existing question-answering dataset and five user interviews. We find that HILL can correctly identify and highlight hallucinations in LLM responses which enables users to handle LLM responses with more caution. With that, we propose an easy-to-implement adaptation to existing LLMs and demonstrate the relevance of usercentered designs of AI artifacts.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
How’s Your Sewing? Investigating Metrics to Automatically Assess Sewing Expertise
Marcel Lahaye (RWTH Aachen University), Ricarda Rahm (RWTH Aachen University), Andreas Dymek (RWTH Aachen University), Adrian Wagner (RWTH Aachen University), Judith Ernstberger (RWTH Aachen University), Jan Borchers (RWTH Aachen University)
In: 2024.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{Lahaye2024HowsYour,
title = {How’s Your Sewing? Investigating Metrics to Automatically Assess Sewing Expertise},
author = {Marcel Lahaye (RWTH Aachen University), Ricarda Rahm (RWTH Aachen University), Andreas Dymek (RWTH Aachen University), Adrian Wagner (RWTH Aachen University), Judith Ernstberger (RWTH Aachen University), Jan Borchers (RWTH Aachen University)},
url = {https://hci.rwth-aachen.de, website},
doi = {10.1145/3613905.3651067},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Makers must regularly assess their expertise when planning projects or selecting tutorials. However, personal bias makes this assessment prone to error, potentially leading to frustration, loss of materials, and discouragement. Additionally, hobbyists have limited feedback possibilities to refine their skills, unlike, for example, apprentice artisans who receive continuous instructor feedback. To address these issues, automated expertise assessment systems could help makers assess their skills and progress. However, such systems require assessment metrics, which have been studied little in the maker context so far. We derived such metrics for sewing from semi-structured interviews with ten sewing-related instructors about their evaluation process. Additionally, we showed them a sewn object and asked them to assess the creator's expertise. From our findings, we derive criteria to use in future automated sewing expertise assessment systems. For one criterion, seam allowance, we present a functional demonstrator that automatically assesses related measurements.},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
Human-Centered Explainable AI (HCXAI): Reloading Explainability in the Era of Large Language Models (LLMs)
Upol Ehsan (Georgia Institute of Technology), Elizabeth A Watkins (Intel Labs), Philipp Wintersberger (University of Applied Sciences Upper Austria), Carina Manger (Technische Hochschule Ingolstadt), Sunnie S.Y. Kim (Princeton University), Niels van Berkel (Aalborg University), Andreas Riener (Technische Hochschule Ingolstadt), Mark O Riedl (Georgia Institute of Technology)
In: 2024.
Abstract | Tags: Workshop | Links:
@inproceedings{Ehsan2024HumancenteredExplainable,
title = {Human-Centered Explainable AI (HCXAI): Reloading Explainability in the Era of Large Language Models (LLMs)},
author = {Upol Ehsan (Georgia Institute of Technology), Elizabeth A Watkins (Intel Labs), Philipp Wintersberger (University of Applied Sciences Upper Austria), Carina Manger (Technische Hochschule Ingolstadt), Sunnie S.Y. Kim (Princeton University), Niels van Berkel (Aalborg University), Andreas Riener (Technische Hochschule Ingolstadt), Mark O Riedl (Georgia Institute of Technology)},
url = {https://hcig.thi.de/, website},
doi = {10.1145/3613905.3636311},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Human-centered XAI (HCXAI) advocates that algorithmic transparency alone is not sufficient for making AI explainable. Explainability of AI is more than just “opening” the black box — who opens it matters just as much, if not more, as the ways of opening it. In the era of Large Language Models (LLMs), is “opening the black box” still a realistic goal for XAI? In this fourth CHI workshop on Human-centered XAI (HCXAI), we build on the maturation through the previous three installments to craft the coming-of-age story of HCXAI in the era of Large Language Models (LLMs). We aim towards actionable interventions that recognize both affordances and pitfalls of XAI. The goal of the fourth installment is to question how XAI assumptions fare in the era of LLMs and examine how human-centered perspectives can be operationalized at the conceptual, methodological, and technical levels. Encouraging holistic (historical, sociological, and technical) approaches, we emphasize “operationalizing.” We seek actionable analysis frameworks, concrete design guidelines, transferable evaluation methods, and principles for accountability.},
keywords = {Workshop},
pubstate = {published},
tppubtype = {inproceedings}
}
Improving Electromyographic Muscle Response Times through Visual and Tactile Prior Stimulation in Virtual Reality
Jessica Sehrt (Frankfurt University of Applied Sciences), Leonardo Leite Ferreira (Frankfurt University of Applied Sciences), Karsten Weyers (Frankfurt University of Applied Sciences), Amir Mahmood (Frankfurt University of Applied Sciences), Thomas Kosch (Humboldt University of Berlin), Valentin Schwind (Frankfurt University of Applied Sciences)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Sehrt2024ImprovingElectromyographic,
title = {Improving Electromyographic Muscle Response Times through Visual and Tactile Prior Stimulation in Virtual Reality},
author = {Jessica Sehrt (Frankfurt University of Applied Sciences), Leonardo Leite Ferreira (Frankfurt University of Applied Sciences), Karsten Weyers (Frankfurt University of Applied Sciences), Amir Mahmood (Frankfurt University of Applied Sciences), Thomas Kosch (Humboldt University of Berlin), Valentin Schwind (Frankfurt University of Applied Sciences)},
url = {https://www.frankfurt-university.de/mixed-reality-lab/, website},
doi = {10.1145/3613904.36420},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Electromyography (EMG) enables hands-free interactions by detecting muscle activity at different human body locations. Previous studies have demonstrated that input performance based on isometric contractions is muscle-dependent and can benefit from synchronous biofeedback. However, it remains unknown whether stimulation before interaction can help to localize and tense a muscle faster. In a response-based VR experiment (N=21), we investigated whether prior stimulation using visual or tactile cues at four different target muscles (biceps, triceps, upper leg, calf) can help reduce the time to perform isometric muscle contractions. The results show that prior stimulation decreases EMG reaction times with visual, vibrotactile, and electrotactile cues. Our experiment also revealed important findings regarding learning and fatigue at the different body locations. We provide qualitative insights into the participants' perceptions and discuss potential reasons for the improved interaction. We contribute with implications and use cases for prior stimulated muscle activation.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Investigating Phubbing in Everyday Life: Challenges & Lessons for Future Research
Thomas Reiter (LMU Munich), Sophia Sakel (LMU Munich), Julian Scharbert (University of Münster), Julian ter Horst (Osnabrück University), Mitja Back (University of Münster), Maarten van Zalk (Osnabrück University), Markus Buehner (LMU Munich), Ramona Schoedel (LMU Munich, Charlotte Fresenius Univesity)
In: 2024.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{Reiter2024InvestigatingPhubbing,
title = {Investigating Phubbing in Everyday Life: Challenges & Lessons for Future Research},
author = {Thomas Reiter (LMU Munich), Sophia Sakel (LMU Munich), Julian Scharbert (University of Münster), Julian ter Horst (Osnabrück University), Mitja Back (University of Münster), Maarten van Zalk (Osnabrück University), Markus Buehner (LMU Munich), Ramona Schoedel (LMU Munich, Charlotte Fresenius Univesity)},
url = {https://www.medien.ifi.lmu.de/, website},
doi = {10.1145/3613905.3651009},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {The ubiquitous presence of smartphones has made them an integral part of our social lives. A well-known example of this phenomenon is phubbing, where smartphone use distracts people from their daily interpersonal interactions. While previous research has mostly relied on often biased global self-reports, our work introduces a novel approach to assessing phubbing in real life. To this end, we conducted an empirical study that integrated experience sampling and mobile sensing methods to obtain a more objective measure and the design of phubbing-aware technologies based on it. By highlighting the challenges associated with existing methods, we aim to stimulate discussion in the field of HCI and encourage the development of socially friendly technologies that benefit real-life interpersonal interactions.},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
Investigating the Effects of External Communication and Platoon Behavior on Manual Drivers at Highway Access
Mark Colley (Institute of Media Informatics, Ulm University), Omid Rajabi (Institute of Media Informatics, Ulm University), Enrico Rukzio (Institute of Media Informatics, Ulm University)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Colley2024InvestigatingEffects,
title = {Investigating the Effects of External Communication and Platoon Behavior on Manual Drivers at Highway Access},
author = {Mark Colley (Institute of Media Informatics, Ulm University), Omid Rajabi (Institute of Media Informatics, Ulm University), Enrico Rukzio (Institute of Media Informatics, Ulm University)},
url = {https://www.uni-ulm.de/en/in/mi/, website https://twitter.com/mi_uulm?lang=de, social media},
doi = {10.1145/3613904.3642365},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Automated vehicles are expected to improve traffic safety and efficiency. One approach to achieve this is via platooning, that is, (automated) vehicles can drive behind each other at very close proximity to reduce air resistance. However, this behavior could lead to difficulties in mixed traffic, for example, when manual drivers try to enter a highway. Therefore, we report the results of a within-subject Virtual Reality study (N=29) evaluating different platoon behaviors (single vs. multiple, i.e., four, gaps) and communication strategies (HUD, AR, attached displays). Results show that AR communication reduced mental workload, improved perceived safety, and a single big gap led to the safest merging behavior. Our work helps to incorporate novel behavior enabled by automation into general traffic better.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Just Undo It: Exploring Undo Mechanics in Multi-User Virtual Reality
Julian Rasch (LMU Munich), Florian Perzl (LMU Munich), Yannick Weiss (LMU Munich), Florian Müller (LMU Munich)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Rasch2024JustUndo,
title = {Just Undo It: Exploring Undo Mechanics in Multi-User Virtual Reality},
author = {Julian Rasch (LMU Munich), Florian Perzl (LMU Munich), Yannick Weiss (LMU Munich), Florian Müller (LMU Munich)},
url = {http://www.medien.ifi.lmu.de/index.xhtml, website
https://twitter.com/mimuc, social media},
doi = {10.1145/3613904.3642864},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {With the proliferation of VR and a metaverse on the horizon, many multi-user activities are migrating to the VR world, calling for effective collaboration support. As one key feature, traditional collaborative systems provide users with undo mechanics to reverse errors and other unwanted changes. While undo has been extensively researched in this domain and is now considered industry standard, it is strikingly absent for VR systems in research and industry. This work addresses this research gap by exploring different undo techniques for basic object manipulation in different collaboration modes in VR. We conducted a study involving 32 participants organized in teams of two. Here, we studied users' performance and preferences in a tower stacking task, varying the available undo techniques and their mode of collaboration. The results suggest that users desire and use undo in VR and that the choice of the undo technique impacts users' performance and social connection.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Keyboard Fighters: The Use of ICTs by Activists in Times of Military Coup in Myanmar
Laura Guntrum (TU Darmstadt)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Guntrum2024KeyboardFighters,
title = {Keyboard Fighters: The Use of ICTs by Activists in Times of Military Coup in Myanmar},
author = {Laura Guntrum (TU Darmstadt)},
url = {www.peasec.de, website},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Amidst the ongoing anti-military protests in Myanmar since 2021, there is a noticeable research gap on ICT-supported activism. Generally, ICTs play an important role during political crises in conjunction with activists’ practices on the ground. Inspired by Resource Mobilization Theory, I conducted qualitative interviews (N=16) and a qualitative online survey (N=34), which demonstrate the intersection between analog and digital domains, showcasing the ingenuity of the activists, and the rapid adoption of ICTs in a country that has experienced a digital revolution within the last few years. As not all people were able to protest on-the-ground, they acted as keyboard fighters to organize protests, to share information, and to support the civil disobedience movement in Myanmar. The study identifies, inter alia, the need for better offline applications with wider coverage in times of internet shutdowns, applications that cannot be easily identified during physical controls, and providing free and secure VPN access.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Language of Zelda: Facilitating Language Learning Practices Using ChatGPT
Sukran Karaosmanoglu (Human-Computer Interaction, Universität Hamburg), Elisabeth L Fittschen (Human-Computer Interaction, Universität Hamburg), Hande Eyicalis (Human-Computer Interaction, Universität Hamburg), David Kraus (Human-Computer Interaction, Universität Hamburg), Henrik Nickelmann (Human-Computer Interaction, Universität Hamburg), Anna Tomko (Human-Computer Interaction, Universität Hamburg), Frank Steinicke (Human-Computer Interaction, Universität Hamburg)
In: 2024.
Abstract | Tags: Student Game Competition | Links:
@inproceedings{Karaosmanoglu2024LanguageZelda,
title = {Language of Zelda: Facilitating Language Learning Practices Using ChatGPT},
author = {Sukran Karaosmanoglu (Human-Computer Interaction, Universität Hamburg), Elisabeth L Fittschen (Human-Computer Interaction, Universität Hamburg), Hande Eyicalis (Human-Computer Interaction, Universität Hamburg), David Kraus (Human-Computer Interaction, Universität Hamburg), Henrik Nickelmann (Human-Computer Interaction, Universität Hamburg), Anna Tomko (Human-Computer Interaction, Universität Hamburg), Frank Steinicke (Human-Computer Interaction, Universität Hamburg)},
url = {https://www.inf.uni-hamburg.de/en/inst/ab/hci, website
https://twitter.com/uhhhci, social media},
doi = {10.1145/3613905.3648107},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {The Language of Zelda is an educational game that re-imagines ``The Legend of Zelda: A Link to the Past'' for French language learning. With the integration of ChatGPT for non-player characters (NPCs), the game allows players to interact with NPCs to practice French through gameplay, puzzles, and quests. Our approach bridges the gap between declarative and procedural language knowledge, offering an engaging, immersive learning experience. The game's adaptive dialogues cater to various proficiency levels, enhancing both education and entertainment values. Our work illustrates the potential of combining AI with game-based learning to create effective, enjoyable language education tools.},
keywords = {Student Game Competition},
pubstate = {published},
tppubtype = {inproceedings}
}
Learning from Cycling: Discovering Lessons Learned from CyclingHCI
Andrii Matviienko (KTH Royal Institute of Technology), Mario Boot (University of Twente), Andreas Löcken (Technische Hochschule Ingolstadt), Bastian Pfelging (TU Bergakademie Freiberg), Markus Löchtefeld (Aalborg University), Tamara von Sawitzky (Technische Hochschule Ingolstadt), Gian-Luca Savino (University of St. Gallen), Miriam Sturdee (University of St. Andrews), Josh Anders (The Australian National University), Kristy Elizabeth Boyer (University of Florida), Stephen Brewster (University of Glasgow), Florian 'Floyd' Mueller (Monash University)
In: 2024.
Abstract | Tags: Workshop | Links:
@inproceedings{Matviienko2024LearningFrom,
title = {Learning from Cycling: Discovering Lessons Learned from CyclingHCI},
author = {Andrii Matviienko (KTH Royal Institute of Technology), Mario Boot (University of Twente), Andreas Löcken (Technische Hochschule Ingolstadt), Bastian Pfelging (TU Bergakademie Freiberg), Markus Löchtefeld (Aalborg University), Tamara von Sawitzky (Technische Hochschule Ingolstadt), Gian-Luca Savino (University of St. Gallen), Miriam Sturdee (University of St. Andrews), Josh Anders (The Australian National University), Kristy Elizabeth Boyer (University of Florida), Stephen Brewster (University of Glasgow), Florian 'Floyd' Mueller (Monash University)},
url = {https://hcig.thi.de/, website},
doi = {10.1145/3613905.3636291},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Cycling plays an essential role in sustainable mobility, health, and socializing. This workshop aims to collect and discuss the lessons learned from Cycling Human-Computer Interaction (CyclingHCI). For this, we will gather researchers and experts in the field to discuss what we learned from designing, building, and evaluating CyclingHCI systems. We will start the workshop with three lessons learned from CyclingHCI defined by the organizers and their experience in the field, which include (1) a lack of theories, tools, and perspectives, (2) knowledge about designing for safety and inclusive cycling, and (3) evaluation methods and environments. Taken together, with this work, we aim to promote interactive technology to get more people cycling, profiting from the many associated benefits.},
keywords = {Workshop},
pubstate = {published},
tppubtype = {inproceedings}
}
Let’s Talk About Death: Existential Conversations with Chatbots
Ruben Albers (University Siegen), Marc Hassenzahl (University Siegen)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Albers2024LetsTalk,
title = {Let’s Talk About Death: Existential Conversations with Chatbots},
author = {Ruben Albers (University Siegen), Marc Hassenzahl (University Siegen)},
url = {http://www.experienceandinteraction.com/, website},
doi = {10.1145/3613904.3642421},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Many people prefer not to think about their own death, let alone talk about it. This contributes to fear of death and reduces the acceptance of its inevitability. We hypothesize that talking about one’s own death with a specially designed chatbot reduces fear of death and strengthens the confidence to discuss the topic further with loved ones. Participants (N=100) talked with the chatbot for an average of 25 minutes. It offered conversations about planning for one's own death, end-of-life preferences, and hopes for the afterlife. We measured participants’ fear and acceptance of death (DAP-R questionnaire) and readiness for end-of-life conversation (REOLC questionnaire) before and after the chat. Overall, attitudes toward death improved and fear decreased, while readiness for end-of-life conversations increased. Bigger changes in attitude corresponded with longer, more reflective responses in the conversations, commitment to plans, finding meaning in death, and some notion of legacy or afterlife.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Listening to the Voices: Describing Ethical Caveats of Conversational User Interfaces According to Experts and Frequent Users
Thomas Mildner (University of Bremen), Orla Cooney (University College Dublin), Anna-Maria Meck (Ludwig Maximilian University of Munich), Marion Bartl (University College Dublin), Gian-Luca Savino (University of St. Gallen), Philip R Doyle (University College Dublin), Diego Garaialde (University College Dublin), Leigh Clark (Bold Insight), John Sloan (Trinity College Dublin), Nina Wenig (University of Bremen), Rainer Malaka (University of Bremen), Jasmin Niess (University of Oslo)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Mildner2024voices,
title = {Listening to the Voices: Describing Ethical Caveats of Conversational User Interfaces According to Experts and Frequent Users },
author = {Thomas Mildner (University of Bremen), Orla Cooney (University College Dublin), Anna-Maria Meck (Ludwig Maximilian University of Munich), Marion Bartl (University College Dublin), Gian-Luca Savino (University of St. Gallen), Philip R Doyle (University College Dublin), Diego Garaialde (University College Dublin), Leigh Clark (Bold Insight), John Sloan (Trinity College Dublin), Nina Wenig (University of Bremen), Rainer Malaka (University of Bremen), Jasmin Niess (University of Oslo)},
url = {https://www.uni-bremen.de/dmlab/},
doi = {10.1145/3613904.3642542},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Advances in natural language processing and understanding have led to a rapid growth in the popularity of conversational user interfaces (CUIs). While CUIs introduce novel benefits, they also yield risks that may exploit people's trust. Although research looking at unethical design deployed through graphical user interfaces (GUIs) established a thorough taxonomy of so-called dark patterns, there is a need for an equally in-depth understanding in the context of CUIs. Addressing this gap, we interviewed 27 participants from three cohorts: researchers, practitioners, and frequent users of CUIs. Applying thematic analysis, we develop five themes reflecting each cohort's insights about ethical design challenges and introduce the CUI Expectation Cycle, bridging system capabilities and user expectations while respecting each theme's ethical caveats. This research aims to inform future work to consider ethical constraints while adopting a human-centred approach.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Look Over Here! Comparing Interaction Methods for User-Assisted Remote Scene Reconstruction
Carina Liebers (University of Duisburg-Essen), Niklas Pfützenreuter (Universität Duisburg-Essen), Marvin Prochazka (University of Duisburg-Essen), Pranav Megarajan (OFFIS - Institute for Information Technology), Eike Furuno (OFFIS – Institute for Information Technology), Jan Löber (University of Duisburg-Essen), Tim Claudius Stratmann (OFFIS - Institute for Information Technology), Jonas Auda (University of Duisburg-Essen), Donald Degraen (University of Duisburg-Essen), Uwe Gruenefeld (University of Duisburg-Essen), Stefan Schneegass (University of Duisburg-Essen)
In: 2024.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{Liebers2024LookOver,
title = {Look Over Here! Comparing Interaction Methods for User-Assisted Remote Scene Reconstruction},
author = {Carina Liebers (University of Duisburg-Essen), Niklas Pfützenreuter (Universität Duisburg-Essen), Marvin Prochazka (University of Duisburg-Essen), Pranav Megarajan (OFFIS - Institute for Information Technology), Eike Furuno (OFFIS – Institute for Information Technology), Jan Löber (University of Duisburg-Essen), Tim Claudius Stratmann (OFFIS - Institute for Information Technology), Jonas Auda (University of Duisburg-Essen), Donald Degraen (University of Duisburg-Essen), Uwe Gruenefeld (University of Duisburg-Essen), Stefan Schneegass (University of Duisburg-Essen)
},
doi = {10.1145/3613905.3650982},
year = {2024},
date = {2024-05-11},
abstract = {Detailed digital representations of physical scenes are key in many cases, such as historical site preservation or hazardous area inspection. To automate the capturing process, robots or drones mounted with sensors can algorithmically record the environment from different viewpoints. However, environmental complexities often lead to incomplete captures. We believe humans can support scene capture as their contextual understanding enables easy identification of missing areas and recording errors. Therefore, they need to perceive the recordings and suggest new sensor poses. In this work, we compare two human-centric approaches in Virtual Reality for scene reconstruction through the teleoperation of a remote robot arm, i.e., directly providing sensor poses (direct method) or specifying missing areas in the scans (indirect method). Our results show that directly providing sensor poses leads to higher efficiency and user experience. In future work, we aim to compare the quality of human assistance to automatic approaches.},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
Mappings in the Home: Selecting Home Appliances in 3D Space
Oliver Nowak (RWTH Aachen University), Lennart Becker (RWTH Aachen University), Sebastian Pettirsch (RWTH Aachen University), Jan Borchers (RWTH Aachen University)
In: 2024.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{Nowak2024MappingsIn,
title = {Mappings in the Home: Selecting Home Appliances in 3D Space},
author = {Oliver Nowak (RWTH Aachen University), Lennart Becker (RWTH Aachen University), Sebastian Pettirsch (RWTH Aachen University), Jan Borchers (RWTH Aachen University)},
url = {https://hci.rwth-aachen.de, website
https://youtu.be/7WEvc7sth5M, teaser video},
doi = {10.1145/3613905.3650745},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Unlike voice assistants, remotes, and smartphones, UIs embedded into furniture and other surfaces offer silent, discreet, and unobtrusive control of smart home appliances. However, as the number of appliances grows, fitting individual controls for each onto the surfaces in our environment becomes impractical, making it necessary to select appliances before controlling them. These appliances are placed in 3D at various heights around the room, while traditional controls are laid out in 2D, complicating control-to-target mapping. We compared six UIs using mappings with spatial analogies that are either absolute or relative to the user's position and perspective. Participants used each to select 20 targets in a simplified living room, once while looking and once eyes-free. We investigated performance and participants' ratings for, inter alia, ease of use, mapping comprehensibility, and mental demand. Map-based controllers were most promising, but participants also ranked perspective projection with touch input highly.},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
Mobilizing Research and Regulatory Action on Dark Patterns and Deceptive Design Practices
Colin M. Gray (University, Bloomington), Johanna T. Gunawan (Northeastern University, Boston), René Schäfer (RWTH Aachen University) Nataliia Bielova (Inria Sophia Antipolis), Lorena Sanchez Chamorro (University of Luxembourg), Katie Seaborn (Tokyo Institute of Technology), Thomas Mildner (University of Bremen), Hauke Sandhaus (Cornell University)
In: 2024.
Abstract | Tags: Workshop | Links:
@inproceedings{Gray2024MobilizingResearch,
title = {Mobilizing Research and Regulatory Action on Dark Patterns and Deceptive Design Practices},
author = {Colin M. Gray (University, Bloomington), Johanna T. Gunawan (Northeastern University, Boston), René Schäfer (RWTH Aachen University) Nataliia Bielova (Inria Sophia Antipolis), Lorena Sanchez Chamorro (University of Luxembourg), Katie Seaborn (Tokyo Institute of Technology), Thomas Mildner (University of Bremen), Hauke Sandhaus (Cornell University)},
url = {https://www.uni-bremen.de/dmlab/, website},
doi = {10.1145/3613905.3636310},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Deceptive, manipulative, and coercive practices are deeply embedded in our digital experiences, impacting our ability to make informed choices and undermining our agency and autonomy. These design practices—collectively known as “dark patterns” or “deceptive patterns”—are increasingly under legal scrutiny and sanctions, largely due to the efforts of human-computer interaction scholars that have conducted pioneering research relating to dark patterns types, definitions, and harms. In this workshop, we continue building this scholarly community with a focus on organizing for action. Our aims include: (i) building capacity around specific research questions relating to methodologies for detection; (ii) characterization of harms; and (iii) creating effective countermeasures. Through the outcomes of the workshop, we will connect our scholarship to the legal, design, and regulatory communities to inform further legislative and legal action.},
keywords = {Workshop},
pubstate = {published},
tppubtype = {inproceedings}
}
More than Task Performance: Developing New Criteria for Successful Human-AI Teaming Using the Cooperative Card Game Hanabi
Christiane Attig (University of Lübeck), Patricia Wollstadt (Honda Research Institute Europe GmbH), Tim Schrills (University of Lübeck), Thomas Franke (University of Lübeck), Christiane Wiebel-Herboth (Honda Research Institute Europe GmbH)
In: 2024.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{Attig2024MoreThan,
title = {More than Task Performance: Developing New Criteria for Successful Human-AI Teaming Using the Cooperative Card Game Hanabi},
author = {Christiane Attig (University of Lübeck), Patricia Wollstadt (Honda Research Institute Europe GmbH), Tim Schrills (University of Lübeck), Thomas Franke (University of Lübeck), Christiane Wiebel-Herboth (Honda Research Institute Europe GmbH)},
url = {https://www.imis.uni-luebeck.de/en/ingpsy, website},
doi = {10.1145/3613905.3650853},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {As we shift to designing AI agents as teammates rather than tools, the social aspects of human-AI interaction become more pronounced. Consequently, to develop agents that are able to navigate the social dynamics that accompany cooperative teamwork, evaluation criteria that refer only to objective task performance will not be sufficient. We propose perceived cooperativity and teaming perception as subjective metrics for investigating successful human-AI teaming. Corresponding questionnaire scales were developed and tested in a pilot study employing the collaborative card game Hanabi, which has been identified as a unique setting for investigating human-AI teaming. Preliminary descriptive results suggest that rule-based and reinforcement learning-based agents differ in terms of perceived cooperativity and teaming perception. Future work will extend the results in a large user study to psychometrically evaluate the scales and test a conceptual framework that includes further aspects related to social dynamics in human-AI teaming.},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
Motionless Movement: Towards Vibrotactile Kinesthetic Displays
Yuran Ding (Max Planck Institute for Informatics, University of Maryland College Park), Nihar Sabnis (Max Planck Institute for Informatics), Paul Strohmeier (Max Planck Institute for Informatics)
In: 2024.
Abstract | Tags: Full Paper, Honorable Mention | Links:
@inproceedings{Ding2024MotionlessMovement,
title = {Motionless Movement: Towards Vibrotactile Kinesthetic Displays},
author = {Yuran Ding (Max Planck Institute for Informatics, University of Maryland College Park), Nihar Sabnis (Max Planck Institute for Informatics), Paul Strohmeier (Max Planck Institute for Informatics)},
url = {https://sensint.mpi-inf.mpg.de/, website
https://x.com/sensintgroup?s=21&t=Wwo5g9aG_rw4oGszZiCMfQ, social media},
doi = {10.1145/3613904.3642499},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Beyond visual and auditory displays, tactile displays and grounded force feedback devices have become more common. Other sensory modalities are also catered to by a broad range of display devices, including temperature, taste, and olfaction. However, one sensory modality remains challenging to represent: kinesthesia – the sense of movement. Inspired by grain-based compliance illusions, we investigate how vibrotactile cues can evoke kinesthetic experiences, even when no movement is performed. We examine the effects of vibrotactile mappings and granularity on the magnitude of perceived motion; distance-based mappings provided the greatest sense of movement. Using an implementation that combines visual feedback and our prototype kinesthetic display, we demonstrate that action-coupled vibrotactile cues are significantly better at conveying an embodied sense of movement than the corresponding visual stimulus, and that combining vibrotactile and visual feedback is best. These results point towards a future where kinesthetic displays will be used in rehabilitation, sports, virtual-reality and beyond.},
keywords = {Full Paper, Honorable Mention},
pubstate = {published},
tppubtype = {inproceedings}
}
Mouse2Vec: Learning Reusable Semantic Representations of Mouse Behaviour
Guanhua Zhang (University of Stuttgart), Zhiming Hu (University of Stuttgart), Mihai Bâce (KU Leuven), Andreas Bulling (University of Stuttgart)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Zhang2024Mouse2Vec,
title = {Mouse2Vec: Learning Reusable Semantic Representations of Mouse Behaviour},
author = {Guanhua Zhang (University of Stuttgart), Zhiming Hu (University of Stuttgart), Mihai Bâce (KU Leuven), Andreas Bulling (University of Stuttgart)},
url = {https://perceptualui.org/, Website
https://fediscience.org/@perceptualui, social media
https://perceptualui.org/publications/zhang24_chi/, project page},
doi = {10.1145/3613904.3642141},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {The mouse is a pervasive input device used for a wide range of interactive applications. However, computational modelling of mouse behaviour typically requires time-consuming design and extraction of handcrafted features, or approaches that are application-specific. We instead propose Mouse2Vec – a novel self-supervised method designed to learn semantic representations of mouse behaviour that are reusable across users and applications. Mouse2Vec uses a Transformer-based encoder-decoder architecture, which is specifically geared for mouse data: During pretraining, the encoder learns an embedding of input mouse trajectories while the decoder reconstructs the input and simultaneously detects mouse click events. We show that the representations learned by our method can identify interpretable mouse behaviour clusters and retrieve similar mouse trajectories. We also demonstrate on three sample downstream tasks that the representations can be practically used to augment mouse data for training supervised methods and serve as an effective feature extractor.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Multi-Modal eHMIs: The Relative Impact of Light and Sound in AV-Pedestrian Interaction
Debargha Dey (Cornell Tech), Toros Ufuk Senan (Industrial Design, Eindhoven University of Technology), Bart Hengeveld (Industrial Design, Eindhoven University of Technology), Mark Colley (Institute of Media Informatics, Ulm University, Cornell Tech), Azra Habibovic (Scania CV), Wendy Ju (Cornell Tech)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Dey2024MultimodalEhmis,
title = {Multi-Modal eHMIs: The Relative Impact of Light and Sound in AV-Pedestrian Interaction},
author = {Debargha Dey (Cornell Tech), Toros Ufuk Senan (Industrial Design, Eindhoven University of Technology), Bart Hengeveld (Industrial Design, Eindhoven University of Technology), Mark Colley (Institute of Media Informatics, Ulm University and Cornell Tech), Azra Habibovic (Scania CV), Wendy Ju (Cornell Tech)},
doi = {10.1145/3613904.3642031},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {External Human-Machine Interfaces (eHMIs) have been evaluated to facilitate interactions between Automated Vehicles (AVs) and pedestrians. Most eHMIs are, however, visual/ light-based solutions, and multi-modal eHMIs have received little attention to date. We ran an experimental video study (N = 29) to systematically understand the effect on pedestrian's willingness to cross the road and user preferences of a light-based eHMI (light bar on the bumper) and two sound-based eHMIs (bell sound and droning sound), and combinations thereof. We found no objective change in pedestrians' willingness to cross the road based on the nature of eHMI, although people expressed different subjective preferences for the different ways an eHMI may communicate, and sometimes even strong dislike for multi-modal eHMIs. This shows that the modality of the evaluated eHMI concepts had relatively little impact on their effectiveness. Consequently, this lays an important groundwork for accessibility considerations of future eHMIs, and points towards the insight that provisions can be made for taking user preferences into account without compromising effectiveness.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
My Data, My Choice, My Insights: Women's Requirements when Collecting, Interpreting and Sharing their Personal Health Data
Sophie Grimme (OFFIS – Institute for Information Technology), Susanna Marie Spoerl (OFFIS – Institute for Information Technology), Susanne Boll (University Oldenburg), Marion Koelle (OFFIS – Institute for Information Technology)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Grimme2024DataChoice,
title = {My Data, My Choice, My Insights: Women's Requirements when Collecting, Interpreting and Sharing their Personal Health Data},
author = {Sophie Grimme (OFFIS – Institute for Information Technology), Susanna Marie Spoerl (OFFIS – Institute for Information Technology), Susanne Boll (University Oldenburg), Marion Koelle (OFFIS – Institute for Information Technology)},
url = {https://hci.uni-oldenburg.de/de/, website},
doi = {10.1145/3613904.3642851},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {HCI research has been instrumental in enabling self-directed health tracking. Despite a plethora of devices and data, however, users' views of their own health are often fragmented. This is a problem for women's health, where physical and mental observations and symptoms are strongly intertwined. An integrated view throughout different life stages could help to better understand these connections, facilitate symptom alleviation through life-style changes, and support timely diagnosis: currently, women's health issues often go under-researched and under-diagnosed. To capture the needs and worries of self-directed tracking, interpreting and sharing women's health data, we held workshops with 28 women. Drawing upon feminist methods, we conducted a Reflexive Thematic Analysis to identify six central themes that ground opportunities and challenges for life-long, self-directed tracking of intimate data. These themes inform the design of tools for data collection, analysis and sharing that empower women to better understand their bodies and demand adequate health services.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
pARam: Leveraging Parametric Design in Extended Reality to Support the Personalization of Artifacts for Personal Fabrication
Evgeny Stemasov (Institute of Media Informatics, Ulm University), Simon Demharter (Institute of Media Informatics, Ulm University), Max Rädler (Institute of Media Informatics, Ulm University), Jan Gugenheimer (TU-Darmstadt, Institut Polytechnique de Paris), Enrico Rukzio (Institute of Media Informatics, Ulm University)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Stemasov2024Param,
title = {pARam: Leveraging Parametric Design in Extended Reality to Support the Personalization of Artifacts for Personal Fabrication},
author = {Evgeny Stemasov (Institute of Media Informatics, Ulm University), Simon Demharter (Institute of Media Informatics, Ulm University), Max Rädler (Institute of Media Informatics, Ulm University), Jan Gugenheimer (TU-Darmstadt and Institut Polytechnique de Paris), Enrico Rukzio (Institute of Media Informatics, Ulm University)},
url = {https://www.uni-ulm.de/in/mi/hci/, website
https://youtu.be/_mj40ft96tY, teaser video
https://youtu.be/yZcv58nkeVE, full video
https://twitter.com/mi_uulm, social media},
doi = {10.1145/3613904.3642083},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Extended Reality (XR) allows in-situ previewing of designs to be manufactured through Personal Fabrication (PF). These in-situ interactions exhibit advantages for PF, like incorporating the environment into the design process. However, design-for-fabrication in XR often happens through either highly complex 3D-modeling or is reduced to rudimentary adaptations of crowd-sourced models. We present pARam, a tool combining parametric designs (PDs) and XR, enabling in-situ configuration of artifacts for PF. In contrast to modeling- or search-focused approaches, pARam supports customization through embodied and practical inputs (e.g., gestures, recommendations) and evaluation (e.g., lighting estimation) without demanding complex 3D-modeling skills. We implemented pARam for HoloLens 2 and evaluated it (n=20), comparing XR and desktop conditions. Users succeeded in choosing context-related parameters and took their environment into account for their configuration using pARam. We reflect on the prospects and challenges of PDs in XR to streamline complex design methods for PF while retaining suitable expressivity.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Perceived Empathy of Technology Scale (PETS): Measuring Empathy of Systems Toward the User
Matthias Schmidmaier (LMU Munich), Jonathan Rupp (University of Innsbruck), Darina Cvetanova (LMU Munich), Sven Mayer (LMU Munich)
In: 2024.
Abstract | Tags: Full Paper, Honorable Mention | Links:
@inproceedings{Schmidmaier2024PerceivedEmpathy,
title = {Perceived Empathy of Technology Scale (PETS): Measuring Empathy of Systems Toward the User},
author = {Matthias Schmidmaier (LMU Munich), Jonathan Rupp (University of Innsbruck), Darina Cvetanova (LMU Munich), Sven Mayer (LMU Munich)},
url = {https://www.medien.ifi.lmu.de/, website},
doi = {10.1145/3613904.3642035},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Affective computing improves rapidly, allowing systems to process human emotions. This enables systems such as conversational agents or social robots to show empathy toward users. While there are various established methods to measure the empathy of humans, there is no reliable and validated instrument to quantify the perceived empathy of interactive systems. Thus, we developed the Perceived Empathy of Technology Scale (PETS) to assess and compare how empathic users perceive technology. We followed a standardized multi-phase process of developing and validating scales. In total, we invited 30 experts for item generation, 324 participants for item selection, and 396 additional participants for scale validation. We developed our scale using 22 scenarios with opposing empathy levels, ensuring the scale is universally applicable. This resulted in the PETS, a 10-item, 2-factor scale. The PETS allows designers and researchers to evaluate and compare the perceived empathy of interactive systems rapidly.},
keywords = {Full Paper, Honorable Mention},
pubstate = {published},
tppubtype = {inproceedings}
}
Playing with Perspectives and Unveiling the Autoethnographic Kaleidoscope in HCI – A Literature Review of Autoethnographies
Annika Kaltenhauser (University of St. Gallen, St. Gallen, Switzerland), Evropi Stefanidi (University of Bremen, Bremen, Germany),, Johannes Schöning (University of St. Gallen, St. Gallen, Switzerland)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Kaltenhauser2024PlayingWith,
title = {Playing with Perspectives and Unveiling the Autoethnographic Kaleidoscope in HCI – A Literature Review of Autoethnographies},
author = {Annika Kaltenhauser (University of St. Gallen, St. Gallen, Switzerland), Evropi Stefanidi (University of Bremen, Bremen, Germany), and Johannes Schöning (University of St. Gallen, St. Gallen, Switzerland)},
url = {https://www.uni-bremen.de/en/dmlab, website},
doi = {10.1145/3613904.3642355},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Autoethnography is a valuable methodological approach bridging the gap between personal experiences and academic inquiry, enabling researchers to gain deep insights into various dimensions of technology use and design. While its adoption in Human-Computer Interaction (HCI) continues to grow, a comprehensive investigation of its function and role within HCI research is still lacking. This paper examines the evolving landscape of autoethnographies within HCI over the past two decades through a systematic literature review. We identify prevalent themes, methodologies, and contributions emerging from autoethnographies by analysing a corpus of 31 HCI publications. Furthermore, we detail data collection techniques and analysis methods and describe reporting standards. Our literature review aims to inform future (HCI) researchers, practitioners, and designers. It encourages them to embrace autoethnography's rich opportunities by providing examples across domains (e.g., Embodiment, Health & Wellbeing) to advance our understanding of the complex relationships between humans and technology.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Portobello: Extending Driving Simulation from the Lab to the Road
Fanjun Bu (Cornell Tech), Stacey Li (Cornell Tech), David Goedicke (Cornell Tech), Mark Colley ( Institute of Media Informatics, Ulm University, Cornell Tech), Gyanendra Sharma (Woven Planet), Wendy Ju, (Cornell Tech)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Bu2024Portobello,
title = {Portobello: Extending Driving Simulation from the Lab to the Road},
author = {Fanjun Bu (Cornell Tech), Stacey Li (Cornell Tech), David Goedicke (Cornell Tech), Mark Colley ( Institute of Media Informatics, Ulm University and Cornell Tech), Gyanendra Sharma (Woven Planet), Wendy Ju, (Cornell Tech)},
doi = {10.1145/3613904.3642341},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {In automotive user interface design, testing often starts with lab-based driving simulators and migrates toward on-road studies to mitigate risks. Mixed reality (XR) helps translate virtual study designs to the real road to increase ecological validity. However, researchers rarely run the same study in both in-lab and on-road simulators due to the challenges of replicating studies in both physical and virtual worlds. To provide a common infrastructure to port in-lab study designs on-road, we built a platform-portable infrastructure, Portobello, to enable us to run twinned physical-virtual studies. As a proof-of-concept, we extended the on-road simulator XR-OOM with Portobello. We ran a within-subjects, autonomous-vehicle crosswalk cooperation study (N=32) both in-lab and on-road to investigate study design portability and platform-driven influences on study outcomes. To our knowledge, this is the first system that enables the twinning of studies originally designed for in-lab simulators to be carried out in an on-road platform.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Real-World Winds: Micro Challenges to Promote Balance Post Smartphone Overload
Nađa Terzimehić (LMU Munich), Julia Huber (LMU Munich), Sarah Aragon-Hahner (LMU Munich), Sven Mayer (LMU Munich)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Terzimehić2024RealWorlsWinds,
title = {Real-World Winds: Micro Challenges to Promote Balance Post Smartphone Overload},
author = {Nađa Terzimehić (LMU Munich), Julia Huber (LMU Munich), Sarah Aragon-Hahner (LMU Munich), Sven Mayer (LMU Munich)},
url = {https://www.medien.ifi.lmu.de/, website},
doi = {https://doi.org/10.1145/3613904.3642583},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {We present and evaluate the concept of winds -- micro challenges to be done in the physical world post-smartphone overload, to encourage exiting the digital smartphone tunnel and promote refreshing breaks from the digital realm. Whereas digital detox solutions are unsustainable in everyday life, current everyday interventions such as screen time reminders or app blockers can induce negative feelings in users. We hypothesize that winds, delivered by our mobile app Real-World Wind, promote balance between the user’s physical and digital activities, as well as engagement with the intervention. RWW tracks users’ smartphone use behavior and distributes winds of five categories upon overload pattern detection. We evaluated the effectiveness of RWW in a week-long field study with 25 participants. Our findings show that winds foster a fun and engaging experience, and significantly promote balance between the digital and physical world post-smartphone overload. We discuss implications for future technology overload interventions.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Remembering through Sound: Co-creating Sound-based Mementos with People with Blindness
Minyoung Yoo, William Odom, Arne Berger, Sam Barnett, Sadhbh Kenny, Priscilla Lo, Samein Shamsher, Gillian Russell,, Lauren Knight
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Yoo2024RememberingThrough,
title = {Remembering through Sound: Co-creating Sound-based Mementos with People with Blindness},
author = {Minyoung Yoo, William Odom, Arne Berger, Sam Barnett, Sadhbh Kenny, Priscilla Lo, Samein Shamsher, Gillian Russell, and Lauren Knight},
url = {https://www.hs-anhalt.de/hochschule-anhalt/fachbereich-5/uebersicht.html, website
www.arneberger.net, social media},
doi = {10.1145/3613904.203641940},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Sound is a preferred and dominant medium that people with blindness use to capture, share and reflect on meaningful moments in their lives. Within the timeframe of 12 months, we worked with seven people with blindness and two of their sighted loved ones to engage in a multi-stage co-creative design process involving multi- ple steps building toward the final co-design workshop. We report three types of sonic mementos, designed together with the participants, that Encapsulate, Augment and Re-imagine personal audio recordings into more interesting and meaningful sonic memories. Building on these sonic mementos, we critically reflect and describe insights into designing sound that supports personal and social experiences of reminiscence for people with blindness through sound. We propose design opportunities to promote collective remembering between people with blindness and their sighted loved ones and design recommendations for remembering through sound.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
SalChartQA: Question-driven Saliency on Information Visualisations
Yao Wang (University of Stuttgart), Weitian Wang (University of Stuttgart), Abdullah Abdelhafez (German University Cairo), Mayar Elfares (University of Stuttgart), Zhiming Hu (University of Stuttgart), Mihai Bâce (KU Leuven), Andreas Bullling (University of Stuttgart)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Wang2024salchartQA,
title = {SalChartQA: Question-driven Saliency on Information Visualisations},
author = {Yao Wang (University of Stuttgart), Weitian Wang (University of Stuttgart), Abdullah Abdelhafez (German University Cairo), Mayar Elfares (University of Stuttgart), Zhiming Hu (University of Stuttgart), Mihai Bâce (KU Leuven), Andreas Bullling (University of Stuttgart)},
url = {https://perceptualui.org/
https://fediscience.org/@perceptualui},
doi = {10.1145/3613904.3642942},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Understanding the link between visual attention and users' information needs when visually exploring information visualisations is under-explored due to a lack of large and diverse datasets to facilitate these analyses. To fill this gap we introduce SalChartQA - a novel crowd-sourced dataset that uses the BubbleView interface to track user attention and a question-answering (QA) paradigm to induce different information needs in users. SalChartQA contains 74,340 answers to 6,000 questions on 3,000 visualisations. Informed by our analyses demonstrating the close correlation between information needs and visual saliency, we propose the first computational method to predict question-driven saliency on visualisations. Our method outperforms state-of-the-art saliency models for several metrics, such as the correlation coefficient and the Kullback-Leibler divergence. These results show the importance of information needs for shaping attentive behaviour and pave the way for new applications, such as task-driven optimisation of visualisations or explainable AI in chart question-answering. },
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Shaping Compliance: Inducing Haptic Illusion of Compliance in Different Shapes with Electrotactile Grains
Arata Jingu (Saarland University, Saarland Informatics Campus, Saarbrücken, Germany), Nihar Sabnis (Sensorimotor Interaction, Max Planck Institute for Informatics, Saarland Informatics Campus, Saarbrücken, Germany), Paul Strohmeier (Sensorimotor Interaction, Max Planck Institute for Informatics, Saarland Informatics Campus, Saarbrücken, Germany), Jürgen Steimle (Saarland University, Saarland Informatics Campus, Saarbrücken, Germany)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Jingu2024ShapingCompliance,
title = {Shaping Compliance: Inducing Haptic Illusion of Compliance in Different Shapes with Electrotactile Grains},
author = {Arata Jingu (Saarland University, Saarland Informatics Campus, Saarbrücken, Germany), Nihar Sabnis (Sensorimotor Interaction, Max Planck Institute for Informatics, Saarland Informatics Campus, Saarbrücken, Germany), Paul Strohmeier (Sensorimotor Interaction, Max Planck Institute for Informatics, Saarland Informatics Campus, Saarbrücken, Germany), Jürgen Steimle (Saarland University, Saarland Informatics Campus, Saarbrücken, Germany)},
url = {https://hci.cs.uni-saarland.de, website
https://twitter.com/HCI_Saarland, social media},
doi = {10.1145/3613904.3641907},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Compliance, the degree of displacement under applied force, is pivotal in determining the material perception when touching an object. Vibrotactile actuators can be used for creating grain-based virtual compliance, but they have poor spatial resolution and a limiting rigid form factor. We propose a novel electrotactile compliance illusion that renders grains of electrical pulses on an electrode array in response to finger force changes. We demonstrate its ability to render compliance in distinct shapes through a thin, lightweight, and flexible finger-worn interface. Detailed technical parameters and the implementation of our device are provided. A controlled experiment confirms the technique can (1) create virtual compliance; (2) adjust the compliance magnitude with grain and electrode parameters; and (3) render compliance with specific shapes. In three example applications, we present how this illusion can enhance physical objects, elements in graphical user interfaces, and virtual reality experiences.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Simulating projective Augmented Reality Visualizations in Virtual Reality: Is VR a feasible Environment for medical AR Evaluations?
Laureen Polenz, Fabian Joeres, Christian Hansen, Florian Heinrich
In: Extended Abstracts of the 2024 CHI Conference on Human Factors in Computing Systems, Association for Computing Machinery, <conf-loc> <city>Honolulu</city> <state>HI</state> <country>USA</country> </conf-loc>, 2024, ISBN: 9798400703317.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{10.1145/3613905.3650843,
title = {Simulating projective Augmented Reality Visualizations in Virtual Reality: Is VR a feasible Environment for medical AR Evaluations?},
author = {Laureen Polenz and Fabian Joeres and Christian Hansen and Florian Heinrich},
url = {https://doi.org/10.1145/3613905.3650843},
doi = {10.1145/3613905.3650843},
isbn = {9798400703317},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
booktitle = {Extended Abstracts of the 2024 CHI Conference on Human Factors in Computing Systems},
publisher = {Association for Computing Machinery},
address = {<conf-loc> <city>Honolulu</city> <state>HI</state> <country>USA</country> </conf-loc>},
series = {CHI EA '24},
abstract = {Augmented Reality (AR) has demonstrated potential in medical applications, such as enhancing surgical navigation. However, evaluating medical AR visualizations entails high costs and effort to provide suitable hardware solutions. This is particularly crucial in projective AR, as these systems require several error-prone calibration and registration steps. This work investigates the suitability of Virtual Reality (VR) as a cost-effective and controlled study environment for evaluating projective AR visualizations. A virtual twin of a real laboratory environment was created, and a user study comparing two needle navigation visualizations was conducted. The study simulated identical experiments in both AR and VR to assess if similar results would emerge. Our findings indicate that both AR and VR experiments exhibited comparable effects in terms of performance and workload of both needle insertion visualizations. This study serves as a preliminary step in demonstrating the feasibility of using VR as an evaluation environment for projective AR visualizations.},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
Sitting Posture Recognition and Feedback: A Literature Review
Christian Krauter (University of Stuttgart, Stuttgart, Germany), Katrin Angerbauer (University of Stuttgart, Stuttgart, Germany), Aimée Sousa Calepso (University of Stuttgart, Stuttgart, Germany), Alexander Achberger (University of Stuttgart, Stuttgart, Germany), Sven Mayer (LMU Munich, Munich, Germany), Michael Sedlmair (University of Stuttgart, Stuttgart, Germany)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Krauter2024SittingPosture,
title = {Sitting Posture Recognition and Feedback: A Literature Review},
author = {Christian Krauter (University of Stuttgart, Stuttgart, Germany), Katrin Angerbauer (University of Stuttgart, Stuttgart, Germany), Aimée Sousa Calepso (University of Stuttgart, Stuttgart, Germany), Alexander Achberger (University of Stuttgart, Stuttgart, Germany), Sven Mayer (LMU Munich, Munich, Germany), Michael Sedlmair (University of Stuttgart, Stuttgart, Germany)},
url = {https://visvar.github.io/, website},
doi = {10.1145/3613904.3642657},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Extensive sitting is unhealthy; thus, countermeasures are needed to react to the ongoing trend toward more prolonged sitting. A variety of studies and guidelines have long addressed the question of how we can improve our sitting habits. Nevertheless, sitting time is still increasing. Here, smart devices can provide a general overview of sitting habits for more nuanced feedback on the user’s sitting posture. Based on a literature review (N=223), including publications from engineering, computer science, medical sciences, electronics, and more, our work guides developers of posture systems. There is a large variety of approaches, with pressure-sensing hardware and visual feedback being the most prominent. We found factors like environment, cost, privacy concerns, portability, and accuracy important for deciding hardware and feedback types. Further, one should consider the user’s capabilities, preferences, and tasks. Regarding user studies for sitting posture feedback, there is a need for better comparability and for investigating long-term effects.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Still Not a Lot of Research? Re-Examining HCI Research on Religion and Spirituality
Sara Wolf (Chair of Psychological Ergonomics, Julius-Maximilians-Universität Würzburg), Paula Friedrich (Chair of Psychological Ergonomics, Julius-Maximilians-Universität Würzburg), Jörn Hurtienne (Chair of Psychological Ergonomics, Julius-Maximilians-Universität Würzburg)
In: 2024.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{Wolf2024StillNot,
title = {Still Not a Lot of Research? Re-Examining HCI Research on Religion and Spirituality},
author = {Sara Wolf (Chair of Psychological Ergonomics, Julius-Maximilians-Universität Würzburg), Paula Friedrich (Chair of Psychological Ergonomics, Julius-Maximilians-Universität Würzburg), Jörn Hurtienne (Chair of Psychological Ergonomics, Julius-Maximilians-Universität Würzburg)},
url = {https://www.mcm.uni-wuerzburg.de/psyergo/, website},
doi = {10.1145/3613905.3651058},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {A decade after Buie and Blythe's review "Spirituality: There's an App for That! (But Not a Lot of Research)", this sequel assesses the evolving landscape of Human-Computer Interaction (HCI) research on religion and spirituality. While the enduring importance of religion and spirituality for humanity and its influence on technology use remains, the last decade has seen transformative shifts catalysed by technological advances and the global impact of the COVID-19 pandemic. This paper explores whether and how HCI research on religion and spirituality has also changed. Providing a snapshot of the current research, we document and reflect on changes in the lines of research with a shift towards community, an increased consideration of religion and spirituality in related areas such as health, education, and society, and the broadening of challenges for HCI research on religion and spirituality.},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
The Illusion of Performance: The Effect of Phantom Display Refresh Rates on User Expectations and Reaction Times
Esther Bosch (German Aerospace Centre Braunschweig), Robin Welsch (Aalto University), Tamim Ayach (HU Berlin), Christopher Katins (HU Berlin), Thomas Kosch (HU Berlin)
In: 2024.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{Bosch2024IllusionPerformance,
title = {The Illusion of Performance: The Effect of Phantom Display Refresh Rates on User Expectations and Reaction Times},
author = {Esther Bosch (German Aerospace Centre Braunschweig), Robin Welsch (Aalto University), Tamim Ayach (HU Berlin), Christopher Katins (HU Berlin), Thomas Kosch (HU Berlin)},
url = {hcistudio.org, website},
doi = {10.1145/3613905.3650875},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {User expectations impact the evaluation of new interactive systems. Elevated expectations may enhance the perceived effectiveness of interfaces in user studies, similar to a placebo effect observed in medical studies. To showcase the placebo effect, we executed a user study with 18 participants who conducted a reaction time test with two different computer screen refresh rates. Participants saw a stated screen refresh rate before every condition, which corresponded to the true refresh rate only in half of the conditions and was lower or higher in the other half. Results revealed successful priming, as participants believed in superior or inferior performance based on the narrative despite using the opposite refresh rate. Post-experiment questionnaires confirmed participants still held onto the initial narrative. Interestingly, the objective performance remained unchanged between both refresh rates. We discuss how study narratives can influence subjective measures and suggest strategies to mitigate placebo effects in user-centered study designs.},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
The Impact of Avatar Completeness on Embodiment and the Detectability of Hand Redirection in Virtual Reality
Martin Feick (DFKI, Saarland University), André Zenner (Saarland University, DFKI), Simon Seibert (Saarland University), Anthony Tang (Singapore Management University), Antonio Krüger (DFKI, Saarland University)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Feick2024ImpactAvatar,
title = {The Impact of Avatar Completeness on Embodiment and the Detectability of Hand Redirection in Virtual Reality},
author = {Martin Feick (DFKI and Saarland University), André Zenner (Saarland University and DFKI), Simon Seibert (Saarland University), Anthony Tang (Singapore Management University), Antonio Krüger (DFKI and Saarland University)},
url = {https://umtl.cs.uni-saarland.de/, website https://www.youtube.com/watch?v=KjqzSFaA818, teaser video https://www.youtube.com/watch?v=BIWzAuJzk9g, full video},
doi = {10.1145/3613904.3641933},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {To enhance interactions in VR, many techniques introduce offsets between the virtual and real-world position of users’ hands. Nevertheless, such hand redirection (HR) techniques are only effective as long as they go unnoticed by users—not disrupting the VR experience. While several studies consider how much unnoticeable redirection can be applied, these focus on mid-air floating hands that are disconnected from users’ bodies. Increasingly, VR avatars are embodied as being directly connected with the user’s body, which provide more visual cue anchoring, and may therefore reduce the unnoticeable redirection threshold. In this work, we studied more complete avatars and their effect on the sense of embodiment and the detectability of HR. We found that higher avatar completeness increases embodiment, and we provide evidence for the absence of practically relevant effects on the detectability of HR.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
The Social Journal: Investigating Technology to Support and Reflect on Social Interactions
Sophia Sakel (LMU Munich), Tabea Blenk, Albrecht Schmidt (LMU Munich), Luke Haliburton (LMU Munich, Munich Center for Machine Learning (MCML))
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Sakel2024SocialJournal,
title = {The Social Journal: Investigating Technology to Support and Reflect on Social Interactions},
author = {Sophia Sakel (LMU Munich), Tabea Blenk, Albrecht Schmidt (LMU Munich), Luke Haliburton (LMU Munich, Munich Center for Machine Learning (MCML))},
url = {https://www.medien.ifi.lmu.de/team/sophia.sakel/, website
},
doi = {https://doi.org/10.1145/3613904.3642411},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Social interaction is a crucial part of what it means to be human. Maintaining a healthy social life is strongly tied to positive outcomes for both physical and mental health. While we use personal informatics data to reflect on many aspects of our lives, technology-supported reflection for social interactions is currently underexplored. To address this, we first conducted an online survey (𝑁 =124) to understand how users want to be supported in their social interactions. Based on this, we designed and developed an app for users to track and reflect on their social interactions and deployed it in the wild for two weeks (𝑁 =25). Our results show that users are interested in tracking meaningful in-person interactions that are currently untraced and that an app can effectively support self-reflection on social interaction frequency and social load. We contribute insights and concrete design recommendations for technology-supported reflection for social interaction.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Touching the Moon: Leveraging Passive Haptics, Embodiment and Presence for Operational Assessments in Virtual Reality
Florian Dufresne (Arts et Métiers Institute of Technology), Tommy Nilsson (European Space Agency), Geoffrey Gorisse (Arts et Métiers Institute of Technology), Enrico Guerra (University Duisburg-Essen), André Zenner (Saarland University & DFKI), Olivier Christmann (Arts et Métiers Institute of Technology), Leonie Bensch (Institute for Software Technology - Software for Space Systems, Interactive Visualization), Nikolai Anton Callus (European Space Agency), Aidan Cowley (European Space Agency)
In: 2024.
Abstract | Tags: Best Paper, Full Paper | Links:
@inproceedings{Dufresne2024TouchingMoon,
title = {Touching the Moon: Leveraging Passive Haptics, Embodiment and Presence for Operational Assessments in Virtual Reality},
author = {Florian Dufresne (Arts et Métiers Institute of Technology), Tommy Nilsson (European Space Agency), Geoffrey Gorisse (Arts et Métiers Institute of Technology), Enrico Guerra (University Duisburg-Essen), André Zenner (Saarland University & DFKI), Olivier Christmann (Arts et Métiers Institute of Technology), Leonie Bensch (Institute for Software Technology - Software for Space Systems and Interactive Visualization), Nikolai Anton Callus (European Space Agency), Aidan Cowley (European Space Agency)},
url = {https://umtl.cs.uni-saarland.de/, website},
doi = {10.1145/3613904.3642292},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Space agencies are in the process of drawing up carefully thought-out Concepts of Operations (ConOps) for future human missions on the Moon. These are typically assessed and validated through costly and logistically demanding analogue field studies. While interactive simulations in Virtual Reality (VR) offer a comparatively cost-effective alternative, they have faced criticism for lacking the fidelity of real-world deployments. This paper explores the applicability of passive haptic interfaces in bridging the gap between simulated and real-world ConOps assessments. Leveraging passive haptic props (equipment mockup and astronaut gloves), we virtually recreated the Apollo 12 mission procedure and assessed it with experienced astronauts and other space experts. Quantitative and qualitative findings indicate that haptics increased presence and embodiment, thus improving perceived simulation fidelity and validity of user reflections. We conclude by discussing the potential role of passive haptic modalities in facilitating early-stage ConOps assessments for human endeavours on the Moon and beyond.},
keywords = {Best Paper, Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Uncovering and Addressing Blink-Related Challenges in Using Eye Tracking for Interactive Systems
Jesse W. Grootjen (LMU Munich), Henrike Weingärtner (LMU Munich), Sven Mayer (LMU Munich)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Grootjen2024BlinkRelated,
title = {Uncovering and Addressing Blink-Related Challenges in Using Eye Tracking for Interactive Systems},
author = {Jesse W. Grootjen (LMU Munich), Henrike Weingärtner (LMU Munich), Sven Mayer (LMU Munich)},
url = {medien.ifi.lmu.de, website
https://twitter.com/mimuc, twitter},
doi = {https://doi.org/10.1145/3613904.3642086},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {While eye tracking is a promising means to understand the user, eye tracking data inherently suffers from missing data due to blinks, which may result in reduced system performance. We conducted a literature review to understand how researchers deal with this issue. We uncovered that researchers often implemented their use-case-specific pipeline to overcome the issue, ranging from ignoring missing data to artificial interpolation. With these first insights, we run a large-scale analysis on 11 publicly available datasets to understand the impact of the various approaches on data quality and accuracy. By this, we highlight the pitfalls in data processing and which methods work best. Based on our results, we provide guidelines for handling eye tracking data for interactive systems. Further, we propose a standard data processing pipeline that allows researchers and practitioners to pre-process and standardize their data efficiently.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Understanding User Acceptance of Electrical Muscle Stimulation in Human-Computer Interaction
Sarah Faltaous (University of Duisburg-Essen), Julie R. Williamson (University of Glasgow), Marion Koelle (OFFIS Institute for Information Technology Oldenburg), Max Pfeiffer (Aldi Sued), Jonas Keppel (University of Duisburg-Essen), Stefan Schneegass (University of Duisburg-Essen)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Faltaous2024UnderstandingUser,
title = {Understanding User Acceptance of Electrical Muscle Stimulation in Human-Computer Interaction},
author = {Sarah Faltaous (University of Duisburg-Essen), Julie R. Williamson (University of Glasgow), Marion Koelle (OFFIS Institute for Information Technology Oldenburg), Max Pfeiffer (Aldi Sued), Jonas Keppel (University of Duisburg-Essen), Stefan Schneegass (University of Duisburg-Essen)},
url = {https://www.hci.wiwi.uni-due.de/, website
https://youtu.be/YlQeNfLmEMQ, teaser video
https://de.linkedin.com/company/hci-group-essen;, social media https://www.youtube.com/@hciessen692;, social media
https://twitter.com/hci_due;, social media
https://m.facebook.com/HCIEssen, social media},
doi = {10.1145/3613904.3642585},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Electrical Muscle Stimulation (EMS) has unique capabilities that can manipulate users' actions or perceptions; for example, actuating user movement while walking, changing the perceived texture of food, and guiding movements while learning an instrument. These applications highlight the potential utility of EMS, but these benefits may be lost if users reject EMS. To investigate the users´ acceptance of EMS, we conducted an online survey (N=101). We compared eight scenarios, six from HCI research applications and two from the sports and health domain. To gain further insights, we conducted in-depth interviews with a subset of the survey respondents (N=10). The results outline the challenges and potentials of EMS with respect to social and technological acceptance, demonstrating higher acceptance of applications for manipulating action as compared to perception. The interviews exposed safety concerns and user expectations for the design and functionality of future EMS applications.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
vARitouch: Back of the Finger Device for Adding Variable Compliance to Rigid Objects
Gabriela Vega (Sensorimotor Interaction Group, Max Planck Institute for Informatics, Saarland Informatics Campus), Valentin Martinez-Missir (Sensorimotor Interaction Group, Max Planck Institute for Informatics, Saarland Informatics Campus), Dennis Wittchen (Sensorimotor Interaction Group, Max Planck Institute for Informatics, Saarland Informatics Campus), Nihar Sabnis (Sensorimotor Interaction Group, Max Planck Institute for Informatics, Saarland Informatics Campus), Audrey Girouard (Carleton University), Karen Anne Cochrane (University of Waterloo), Paul Strohmeier (Sensorimotor Interaction Group, Max Planck Institute for Informatics, Saarland Informatics Campus)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Vega2024Varitouch,
title = {vARitouch: Back of the Finger Device for Adding Variable Compliance to Rigid Objects},
author = {Gabriela Vega (Sensorimotor Interaction Group, Max Planck Institute for Informatics, Saarland Informatics Campus), Valentin Martinez-Missir (Sensorimotor Interaction Group, Max Planck Institute for Informatics, Saarland Informatics Campus), Dennis Wittchen (Sensorimotor Interaction Group, Max Planck Institute for Informatics, Saarland Informatics Campus), Nihar Sabnis (Sensorimotor Interaction Group, Max Planck Institute for Informatics, Saarland Informatics Campus), Audrey Girouard (Carleton University), Karen Anne Cochrane (University of Waterloo), Paul Strohmeier (Sensorimotor Interaction Group, Max Planck Institute for Informatics, Saarland Informatics Campus)},
url = {https://sensint.mpi-inf.mpg.de/, website},
doi = {10.1145/3613904.3642828},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {We present vARitouch, a back-of-the-finger wearable that can modify the perceived tactile material properties of the uninstrumented world around us: vARitouch can modulate the perceived softness of a rigid object through a vibrotactile compliance illusion. As vARitouch does not cover the fingertip, all-natural tactile properties are preserved. We provide three contributions: (1) We demonstrate the feasibility of the concept through a psychophysics study, showing that virtual compliance can be continuously modulated, and perceived softness can be increased by approximately 30 Shore A levels. (2) A qualitative study indicates the desirability of such a device, showing that a back-of-the-finger haptic device has many attractive qualities. (3) To implement vARitouch, we identify a novel way to measure pressure from the back of the finger by repurposing a pulse oximetry sensor. Based on these contributions, we present the finalized vARitouch system, accompanied by a series of application scenarios.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Virtual Unreality: Augmentation-Oriented Ideation Through Design Cards
Robin Neuhaus (Ubiquitous Design / Experience & Interaction, University of Siegen),, Ronda Ringfort-Felner (Ubiquitous Design / Experience & Interaction, University of Siegen),, Daniel Courtney (Ubiquitous Design / Experience & Interaction, University of Siegen), Madlen Kneile (Interaction Design for Sustainability, Transformation, University of Siegen), Marc Hassenzahl (Ubiquitous Design / Experience & Interaction, University of Siegen)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Neuhaus2024VirtualUnreality,
title = {Virtual Unreality: Augmentation-Oriented Ideation Through Design Cards},
author = {Robin Neuhaus (Ubiquitous Design / Experience & Interaction, University of Siegen),, Ronda Ringfort-Felner (Ubiquitous Design / Experience & Interaction, University of Siegen),, Daniel Courtney (Ubiquitous Design / Experience & Interaction, University of Siegen), Madlen Kneile (Interaction Design for Sustainability and Transformation, University of Siegen), Marc Hassenzahl (Ubiquitous Design / Experience & Interaction, University of Siegen)},
url = {http://www.experienceandinteraction.com, website},
doi = {10.1145/3613904.3642364},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {While realism is a common design goal for virtual reality (VR), VR also offers opportunities that are impossible in the real world (e.g., telekinesis). So far, there is no design support to exploit the potential of such “impossible” augmentations, especially for serious applications. We developed a card set and a workshop format, which features 15 opportunities to facilitate the ideation of augmentation-oriented VR. We piloted the method in five workshops with people in the early stages of developing a VR application (N=35). Participants found the cards easy to use and to inspire viable new concepts that differed from earlier ideas. Analysis of the concepts with interaction criticism identified two strategies: (1) augmentations that are only loosely related to the purpose of the application, simply to increase “fun”, and (2) augmentations that are closely related to the core purpose and thereby subtly facilitate its fulfillment. The latter has the greater potential.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Why the Fine, AI? The Effect of Explanation Level on Citizens' Fairness Perception of AI-based Discretion in Public Administrations
Saja Aljuneidi (OFFIS - Institute for Information Technology), Wilko Heuten (OFFIS - Institute for Information Technology), Larbi Abdenebaoui (OFFIS - Institute for Information Technology), Maria Wolters (OFFIS - Institute for Information Technology), Susanne Boll (University of Oldenburg)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Aljuneidi2024WhyFine,
title = {Why the Fine, AI? The Effect of Explanation Level on Citizens' Fairness Perception of AI-based Discretion in Public Administrations},
author = {Saja Aljuneidi (OFFIS - Institute for Information Technology), Wilko Heuten (OFFIS - Institute for Information Technology), Larbi Abdenebaoui (OFFIS - Institute for Information Technology), Maria Wolters (OFFIS - Institute for Information Technology), Susanne Boll (University of Oldenburg)},
url = {https://hci.uni-oldenburg.de/, website
https://cloudstorage.elearning.uni-oldenburg.de/s/oQKKF3ddnnD4qeK, teaser video
https://cloudstorage.elearning.uni-oldenburg.de/s/4z65z59nSHrofdm, full video},
doi = {10.1145/3613904.3642535},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {The integration of Artificial Intelligence into decision-making processes within public administration extends to AI-systems that exercise administrative discretion. This raises fairness concerns among citizens, possibly leading to AI-systems abandonment. Uncertainty persists regarding explanation elements impacting citizens' perception of fairness and technology adoption level. In a video-vignette online-survey (N=847), we investigated the impact of explanation levels on citizens' perceptions of informational fairness, distributive fairness, and system adoption level. We enhanced explanations in three stages: none, factor explanations, culminating in factor importance explanations. We found that more detailed explanations improved informational and distributive fairness perceptions, but did not affect citizens' willingness to reuse the system. Interestingly, citizens with higher AI-literacy expressed greater willingness to adopt the system, regardless of the explanation levels. Qualitative findings revealed that greater human involvement and appeal mechanisms could positively influence citizens' perceptions. Our findings highlight the importance of citizen-centered design of AI-based decision-making in public administration.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Writer-Defined AI Personas for On-Demand Feedback Generation
Karim Benharrak (Department of Computer Science, University of Texas, Austin, Austin, Texas, United States), Tim Zindulka (University of Bayreuth, Bayreuth, Germany), Florian Lehmann (University of Bayreuth, Bayreuth, Germany), Hendrik Heuer (Institute for Information Management Bremen, University of Bremen, Bremen, Germany), Daniel Buschek (University of Bayreuth, Bayreuth, Germany)
In: 2024.
Abstract | Tags: Full Paper | Links:
@inproceedings{Benharrak2024WriterdefinedAi,
title = {Writer-Defined AI Personas for On-Demand Feedback Generation},
author = {Karim Benharrak (Department of Computer Science, University of Texas, Austin, Austin, Texas, United States), Tim Zindulka (University of Bayreuth, Bayreuth, Germany), Florian Lehmann (University of Bayreuth, Bayreuth, Germany), Hendrik Heuer (Institute for Information Management Bremen, University of Bremen, Bremen, Germany), Daniel Buschek (University of Bayreuth, Bayreuth, Germany)},
url = {https://www.hciai.uni-bayreuth.de/en/index.html, website},
doi = {10.1145/3613904.3642406},
year = {2024},
date = {2024-05-11},
urldate = {2024-05-11},
abstract = {Compelling writing is tailored to its audience. This is challenging, as writers may struggle to empathize with readers, get feedback in time, or gain access to the target group. We propose a concept that generates on-demand feedback, based on writer-defined AI personas of any target audience. We explore this concept with a prototype (using GPT-3.5) in two user studies (N=5 and N=11): Writers appreciated the concept and strategically used personas for getting different perspectives. The feedback was seen as helpful and inspired revisions of text and personas, although it was often verbose and unspecific. We discuss the impact of on-demand feedback, the limited representativity of contemporary AI systems, and further ideas for defining AI personas. This work contributes to the vision of supporting writers with AI by expanding the socio-technical perspective in AI tool design: To empower creators, we also need to keep in mind their relationship to an audience.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}