2020 in Numbers
This year, the German labs contribute 138 publications in total to the 2020 ACM CHI Conference on Human Factors in Computing Systems. At the heart, there are 83 Papers, including 1 Best Paper and 14 Honorable Mentions. Further, we bring 34 Late-Breaking Works, 5 Demonstrations, 7 organized Workshops & Symposia, 2 Case Studies, 2 Journal Articles, 1 SIG, 1 SIGCHI Outstanding Dissertation Award and 1 Student Game Competition to CHI this year. All these publications are listed below.
'It’s in my other hand!' - Studying the Interplay of Interaction Techniques and Multi-Tablet Activities
Johannes Zagermann (University of Konstanz), Ulrike Pfeil (University of Konstanz), Philipp von Bauer (University of Konstanz), Daniel Fink (University of Konstanz), Harald Reiterer (University of Konstanz)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{ZagermannStudying,
title = {'It’s in my other hand!' - Studying the Interplay of Interaction Techniques and Multi-Tablet Activities},
author = {Johannes Zagermann (University of Konstanz) and Ulrike Pfeil (University of Konstanz) and Philipp von Bauer (University of Konstanz) and Daniel Fink (University of Konstanz) and Harald Reiterer (University of Konstanz)},
url = {https://youtu.be/_LZsSPP1FM4, Video
https://www.twitter.com/HCIGroupKN, Twitter},
doi = {10.1145/3313831.3376540},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Cross-device interaction with tablets is a popular topic in HCI research. Recent work has shown the benefits of including multiple devices into users’ workflows while various interaction techniques allow transferring content across devices. However, users are only reluctantly using multiple devices in combination. At the same time, research on cross-device interaction struggles to find a frame of reference to compare techniques or systems. In this paper, we try to address these challenges by studying the interplay of interaction techniques, device utilization, and task-specific activities in a user study with 24 participants from different but complementary angles of evaluation using an abstract task, a sensemaking task, and three interaction techniques. We found that different interaction techniques have a lower influence than expected, that work behaviors and device utilization depend on the task at hand, and that participants value specific aspects of cross-device interaction.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
3D-Auth: Two-Factor Authentication with Personalized 3D-Printed Items
Karola Marky (TU Darmstadt), Martin Schmitz (TU Darmstadt), Verena Zimmer (TU Darmstadt), Martin Herbers (TU Darmstadt), Kai Kunze (Keio Media Design), Max Mühlhäuser (TU Darmstadt)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{Marky3D,
title = {3D-Auth: Two-Factor Authentication with Personalized 3D-Printed Items},
author = {Karola Marky (TU Darmstadt) and Martin Schmitz (TU Darmstadt) and Verena Zimmer (TU Darmstadt) and Martin Herbers (TU Darmstadt) and Kai Kunze (Keio Media Design) and Max Mühlhäuser (TU Darmstadt)},
url = {https://youtu.be/_dHihnJTRek, Video
https://twitter.com/search?q=%23teamdarmstadt&src=typed_query&f=live, Twitter},
doi = {10.1145/3313831.3376189},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Two-factor authentication is a widely recommended security mechanism and already offered for different services. However, known methods and physical realizations exhibit considerable usability and customization issues. In this paper, we propose 3D-Auth, a new concept of two-factor authentication. 3D-Auth is based on customizable 3D-printed items that combine two authentication factors in one object. The object bottom contains a uniform grid of conductive dots that are connected to a unique embedded structure inside the item. Based on the interaction with the item, different dots turn into touch-points and form an authentication pattern. This pattern can be recognized by a capacitive touchscreen. Based on an expert design study, we present an interaction space with six categories of possible authentication interactions. In a user study, we demonstrate the feasibility of 3D-Auth items and show that the items are easy to use and the interactions are easy to remember.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
A Conversational Agent to Improve Response Quality in Course Evaluations
Thiemo Wambsganss (University of St.Gallen), Rainer Winkler (University of St.Gallen), Matthias Söllner (University of Kassel / University of St.Gallen), Jan Marco Leimeister (University of St.Gallen / University of Kassel)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Tags: Late Breaking Work | Links:
@inproceedings{WambsganssConversational,
title = {A Conversational Agent to Improve Response Quality in Course Evaluations},
author = {Thiemo Wambsganss (University of St.Gallen) and Rainer Winkler (University of St.Gallen) and Matthias Söllner (University of Kassel / University of St.Gallen) and Jan Marco Leimeister (University of St.Gallen / University of Kassel)},
doi = {10.1145/3334480.3382805},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
A Human Touch: Social Touch Increases the Perceived Human-likeness of Agents in Virtual Reality
Matthias Hoppe (LMU Munich), Beat Rossmy (LMU Munich), Daniel Peter Neumann (LMU Munich), Stephan Streuber (University of Konstanz), Albrecht Schmidt (LMU Munich), Tonja Machulla (LMU Munich)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{HoppeAHumanTouch,
title = {A Human Touch: Social Touch Increases the Perceived Human-likeness of Agents in Virtual Reality},
author = {Matthias Hoppe (LMU Munich) and Beat Rossmy (LMU Munich) and Daniel Peter Neumann (LMU Munich) and Stephan Streuber (University of Konstanz) and Albrecht Schmidt (LMU Munich) and Tonja Machulla (LMU Munich)},
url = {https://www.twitter.com/mimuc, Twitter},
doi = {10.1145/3313831.3376719},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Virtual Reality experiences and games present believable virtual environments based on graphical quality, spatial audio, and interactivity. The interaction with in-game characters, controlled by computers (agents) or humans (avatars), is an important part of VR experiences. Pre-captured motion sequences increase the visual humanoid resemblance. However, this still precludes realistic social interactions (eye contact, imitation of body language), particularly for agents. We aim to make social interaction more realistic via social touch. Social touch is non-verbal, conveys feelings and signals (coexistence, closure, intimacy). In our research, we created an artificial hand to apply social touch in a repeatable and controlled fashion to investigate its effect on the perceived human-likeness of avatars and agents. Our results show that social touch is effective to further blur the boundary between computer- and human-controlled virtual characters and contributes to experiences that closely resemble human-to-human interactions.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
A Longitudinal Video Study on Communicating Status and Intent for Self-Driving Vehicle – Pedestrian Interaction
Stefanie M. Faas (Mercedes-Benz AG / Ulm University), Andrea C. Kao (Mercedes-Benz RD NA), Martin Baumann (Ulm University)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{FassLongitudinal,
title = {A Longitudinal Video Study on Communicating Status and Intent for Self-Driving Vehicle – Pedestrian Interaction},
author = {Stefanie M. Faas (Mercedes-Benz AG / Ulm University) and Andrea C. Kao (Mercedes-Benz RD NA) and Martin Baumann (Ulm University)},
doi = {10.1145/3313831.3376484},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {With self-driving vehicles (SDVs), pedestrians cannot rely on communication with the driver anymore. Industry experts and policymakers are proposing an external Human-Machine Interface (eHMI) communicating the automated status. We investigated whether additionally communicating SDVs’ intent to give right of way further improves pedestrians’ street crossing. To evaluate the stability of these eHMI effects, we conducted a three-session video study with N=34 pedestrians where we assessed subjective evaluations and crossing onset times. This is the first work capturing long-term effects of eHMIs. Our findings add credibility to prior studies by showing that eHMI effects last (acceptance, user experience) or even increase (crossing onset, perceived safety, trust, learnability, reliance) with time. We found that pedestrians benefit from an eHMI communicating SDVs’ status, and that additionally communicating SDVs’ intent adds further value. We conclude that SDVs should be equipped with an eHMI communicating both status and intent.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
A View on the Viewer: Gaze-Adaptive Captions for Videos
Kuno Kurzhals (ETH Zürich), Fabian Göbel (ETH Zürich), Katrin Angerbauer (University of Stuttgart), Michael Sedlmair (University of Stuttgart), Martin Raubal (ETH Zürich)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper, Honorable Mention | Links:
@inproceedings{KurzhalsView,
title = {A View on the Viewer: Gaze-Adaptive Captions for Videos},
author = {Kuno Kurzhals (ETH Zürich) and Fabian Göbel (ETH Zürich) and Katrin Angerbauer (University of Stuttgart) and Michael Sedlmair (University of Stuttgart) and Martin Raubal (ETH Zürich)},
doi = {10.1145/3313831.3376266},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {"Subtitles play a crucial role in cross-lingual distribution ofmultimedia content and help communicate
information where auditory content is not feasible (loud environments, hearing impairments, unknown languages). Established methods utilize text at the bottom of the screen, which may distract from the video. Alternative techniques place captions closer to relatedcontent (e.g., faces) but are not applicable to arbitrary videos such as documentations. Hence, we propose to leverage live gaze as indirect input method to adapt captions to individual viewing behavior. We implemented two gaze-adaptive methods and compared them in a user study (n=54) to traditional captions and audio-only videos. The results show that viewers with less experience with captions prefer our gaze-adaptive methods as they assist them in reading. Furthermore, gaze distributions resulting from our methods are closer to natural viewing behavior compared to the traditional approach. Based on these results, we provide design implications for gaze-adaptive captions."},
keywords = {Full Paper, Honorable Mention},
pubstate = {published},
tppubtype = {inproceedings}
}
information where auditory content is not feasible (loud environments, hearing impairments, unknown languages). Established methods utilize text at the bottom of the screen, which may distract from the video. Alternative techniques place captions closer to relatedcontent (e.g., faces) but are not applicable to arbitrary videos such as documentations. Hence, we propose to leverage live gaze as indirect input method to adapt captions to individual viewing behavior. We implemented two gaze-adaptive methods and compared them in a user study (n=54) to traditional captions and audio-only videos. The results show that viewers with less experience with captions prefer our gaze-adaptive methods as they assist them in reading. Furthermore, gaze distributions resulting from our methods are closer to natural viewing behavior compared to the traditional approach. Based on these results, we provide design implications for gaze-adaptive captions."
A Virtual Reality Couch Configurator Leveraging Passive Haptic Feedback
André Zenner (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus), Felix Kosmalla (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus), Jan Ehrlich(German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus), Philip Hell (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus), Gerrit Kahl (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus), Christian Murlowski (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus), Marco Speicher (Deutsche Hochschule für Prävention und Gesundheitsmanagement (DHfPG)), Florian Daiber (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus), Daniel Heinrich (FOM University of Applied Science, Essen), Antonio Krüger (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{ZennerVirtual,
title = {A Virtual Reality Couch Configurator Leveraging Passive Haptic Feedback},
author = {André Zenner (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus) and Felix Kosmalla (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus) and Jan Ehrlich(German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus) and Philip Hell (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus) and Gerrit Kahl (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus) and Christian Murlowski (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus) and Marco Speicher (Deutsche Hochschule für Prävention und Gesundheitsmanagement (DHfPG)) and Florian Daiber (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus) and Daniel Heinrich (FOM University of Applied Science, Essen) and Antonio Krüger (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus)},
url = {https://youtu.be/Qh5_bnfIQL8, Video},
doi = {10.1145/3334480.3382953},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {When configuring furniture during sales consultancy in a furniture store, customers are usually confronted with abstract 2D drawings or simplistic renderings of the discussed configuration on a display. We present a novel application based on virtual reality (VR) to support furniture store consultations. Our system allows customers to elaborate different configurations of a couch in dialogue with a sales expert and lets customers experience them through immersive VR in a variety of virtual environments. While the sales-expert can modify the couch layout and fabric, the customer can stay immersed and experience a realistic tactile feeling of the configured couch through passive haptic feedback provided by a sample piece the customer can sit on. A preliminary field study in a furniture store showed that the system is immersive, conveying realistic impressions of the couch configurations. Customers perceived the VR configurator as useful since it would make their purchase decisions easier.},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
AL: An Adaptive Learning Support System for Argumentation Skills
Thiemo Wambsganß (University of St.Gallen), Christina Niklaus (University of St.Gallen), Matthias Cetto (University of St. Gallen), Matthias Söllner (University of Kassel & University of St. Gallen), Siegfried Handschuh (University of St. Gallen & University of Passau),, Jan Marco Leimeister (University of St. Gallen & Kassel University)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Tags: Full Paper, Honorable Mention | Links:
@inproceedings{WambsganssAL,
title = {AL: An Adaptive Learning Support System for Argumentation Skills},
author = {Thiemo Wambsganß (University of St.Gallen), Christina Niklaus (University of St.Gallen), Matthias Cetto (University of St. Gallen), Matthias Söllner (University of Kassel & University of St. Gallen), Siegfried Handschuh (University of St. Gallen & University of Passau), and Jan Marco Leimeister (University of St. Gallen & Kassel University)},
doi = {10.1145/3313831.3376851},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Full Paper, Honorable Mention},
pubstate = {published},
tppubtype = {inproceedings}
}
All Fun and Games: Obtaining Critical Pedestrian Behavior Data from an Online Simulation
Kai Holländer (LMU Munich), Luca Schellenberg (LMU Munich), Changkun Ou (LMU Munich), Andreas Butz (LMU Munich)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{HollaenderAll,
title = {All Fun and Games: Obtaining Critical Pedestrian Behavior Data from an Online Simulation},
author = {Kai Holländer (LMU Munich) and Luca Schellenberg (LMU Munich) and Changkun Ou (LMU Munich) and Andreas Butz (LMU Munich)},
doi = {10.1145/3334480.3382797},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Automated cars will need to observe pedestrians and react adequately to their behavior when driving in urban areas. Judging pedestrian behavior, however, is hard. When approaching it by machine learning methods, large amounts of training data is needed, which is costly and difficult to obtain, especially for critical situations. In order to provide such data, we have developed an online game inspired by Frogger, in which players have to cross streets. Accidents and critical situations are a natural part of the data produced in such a way without anybody getting hurt in reality. We present the design of our game and an analysis of the resulting data and its match to real world behavior observed in previous work. We found that behavior patterns in real and virtual environments correlated and argue that game data could be used to train machine learning algorithms for predicting real pedestrians' walking trajectories when crossing a road. This approach could be used in future automated vehicles to increase pedestrian safety.},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
All in One! User Perceptions on Centralized IoT Privacy Settings
Karola Marky (TU Darmstadt), Kai Kunze (Keio University), Verena Zimmermann (TU Darmstadt), Alina Stöver (TU Darmstadt), Philipp Hoffmann (TU Darmstadt), Max Mühlhäuser (TU Darmstadt)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{MarkyAll,
title = {All in One! User Perceptions on Centralized IoT Privacy Settings},
author = {Karola Marky (TU Darmstadt) and Kai Kunze (Keio University) and Verena Zimmermann (TU Darmstadt) and Alina Stöver (TU Darmstadt) and Philipp Hoffmann (TU Darmstadt) and Max Mühlhäuser (TU Darmstadt)},
doi = {10.1145/3334480.3383016},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {IoT devices deliver their functionality by accessing data. Users decide which data they are willing to share via privacy settings interfaces that are typically on the device, or in the app controlling it. Thus, users have to interact with each device or app which is time-consuming and settings might be overlooked. In this paper, we provide a stepping stone into a multi-device interface for adjusting privacy settings. We present three levels of information detail: 1) sensor name 2), sensor name and information about captured data and 3) detailed information on each collected data type including consequences. Through a pre-study with 15 participants, we found that users prefer the access to detailed information because this offers the best decision support. They also wish for a clear status communication, a possibility for rule-based settings, and delegation options.},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
AmbiPlant - Ambient Feedback for Digital Media through Actuated Plants
Donald Degraen (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus), Marc Schubhan (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus), Kamila Mushkina (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus), Akhmajon Makhsadov (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus), Felix Kosmalla (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus), André Zenner (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus), Antonio Krüger (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{DegraenAmbiPlant,
title = {AmbiPlant - Ambient Feedback for Digital Media through Actuated Plants},
author = {Donald Degraen (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus) and Marc Schubhan (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus) and Kamila Mushkina (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus) and Akhmajon Makhsadov (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus) and Felix Kosmalla (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus) and André Zenner (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus) and Antonio Krüger (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus)},
url = {https://www.youtube.com/watch?v=HvSRRmgbg58, Video},
doi = {10.1145/3334480.3382860},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {To enhance viewing experiences during digital media consumption, both research and industry have considered ambient feedback effects to visually and physically extend the content presented. In this paper, we present AmbiPlant, a system using support structures for plants as interfaces for providing ambient effects during digital media consumption. In our concept, the media content presented to the viewer is augmented with visual actuation of the plant structures in order to enhance the viewing experience. We report on the results of a user study comparing our AmbiPlant condition to a condition with ambient lighting and a condition without ambient effects. Our system outperformed the "no ambient effects" condition in terms of engagement, entertainment, excitement and innovation and the ambient lighting condition in terms of excitement and innovation.},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
Assessing 2D and 3D Heatmaps for Comparative Analysis: An Empirical Study
Matthias Kraus (University of Konstanz), Katrin Angerbauer (University of Stuttgart), Juri Buchmüller (University of Konstanz), Daniel Schweitzer (University of Konstanz), Daniel Keim (University of Konstanz), Michael Sedlmair (University of Stuttgart), Johannes Fuchs (University of Konstanz)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{KrausAssessing,
title = {Assessing 2D and 3D Heatmaps for Comparative Analysis: An Empirical Study},
author = {Matthias Kraus (University of Konstanz) and Katrin Angerbauer (University of Stuttgart) and Juri Buchmüller (University of Konstanz) and Daniel Schweitzer (University of Konstanz) and Daniel Keim (University of Konstanz) and Michael Sedlmair (University of Stuttgart) and Johannes Fuchs (University of Konstanz)},
url = {https://youtu.be/ybSj8ibu-qA, Video
https://www.twitter.com/dbvis, Twitter},
doi = {10.1145/3313831.3376675},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Heatmaps are a popular visualization technique that encode 2D density distributions using color or brightness. Experimental studies have shown though that both of these visual variables are inaccurate when reading and comparing numeric data values. A potential remedy might be to use 3D heatmaps by introducing height as a third dimension to encode the data. Encoding abstract data in 3D, however, poses many problems, too. To better understand this tradeoff, we conducted an empirical study (N=48) to evaluate the user performance of 2D and 3D heatmaps for comparative analysis tasks. We test our conditions on a conventional 2D screen, but also in a virtual reality environment to allow for real stereoscopic vision. Our main results show that 3D heatmaps are superior in terms of error rate when reading and comparing single data items. However, for overview tasks, the well-established 2D heatmap performs better.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Attention-Aware Brain Computer Interface to Avoid Distractions in Augmented Reality
Lisa-Marie Vortmann (University of Bremen), Felix Putze (University of Bremen)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Tags: Late Breaking Work | Links:
@inproceedings{VortmannAttention,
title = {Attention-Aware Brain Computer Interface to Avoid Distractions in Augmented Reality},
author = {Lisa-Marie Vortmann (University of Bremen) and Felix Putze (University of Bremen)},
doi = {10.1145/3334480.3382889},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
Augmentation Concepts with HUDs for Cyclists to Improve Road Safety in Shared Spaces
Tamara von Sawitzky (Human-Computer Interaction Group, Technische Hochschule Ingolstadt), Philipp Wintersberger (CARISSMA, Technische Hochschule Ingolstadt), Andreas Löcken (Human-Computer Interaction Group, Technische Hochschule Ingolstadt), Anna-Katharina Frison (Human-Computer Interaction Group, Technische Hochschule Ingolstadt), Andreas Riener (Human-Computer Interaction Group, Technische Hochschule Ingolstadt)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{SawitzkyAugmentation,
title = {Augmentation Concepts with HUDs for Cyclists to Improve Road Safety in Shared Spaces},
author = {Tamara von Sawitzky (Human-Computer Interaction Group, Technische Hochschule Ingolstadt) and Philipp Wintersberger (CARISSMA, Technische Hochschule Ingolstadt) and Andreas Löcken (Human-Computer Interaction Group, Technische Hochschule Ingolstadt) and Anna-Katharina Frison (Human-Computer Interaction Group, Technische Hochschule Ingolstadt) and Andreas Riener (Human-Computer Interaction Group, Technische Hochschule Ingolstadt)},
doi = {10.1145/3334480.3383022},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Exploiting the potential of automated vehicles and future traffic concepts like platooning or dynamic intersections requires the integration of human traffic participants. Re-cent research investigating how automated vehicles can communicate with other road users has focused mainly on pedestrians. We argue that cyclists are another important group of vulnerable road users that must be considered, as cycling is a vital transportation modality for a more sustainable future. Within this paper, we discuss the needs of cyclists and claim that their integration will demand to hink of other concepts, which support moving communication partners. We further sketch potential approaches for augmented reality applications based on related work and present results of a pilot study aiming to evaluate and improve those. Initial findings show that people are open towards concepts that increase cyclist safety. However, it is key to present information clearly and unambiguously to produce a benefit.},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
Augmented Displays: Seamlessly Extending Interactive Surfaces with Head-Mounted Augmented Reality
Patrick Reipschläger (Technische Universität Dresden), Severin Engert (Technische Universität Dresden), Raimund Dachselt (Technische Universität Dresden)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Interactivity/Demonstration | Links:
@inproceedings{ReipschlaegerAugmented,
title = {Augmented Displays: Seamlessly Extending Interactive Surfaces with Head-Mounted Augmented Reality},
author = {Patrick Reipschläger (Technische Universität Dresden) and Severin Engert (Technische Universität Dresden) and Raimund Dachselt (Technische Universität Dresden)},
url = {https://www.twitter.com/imldresden, Twitter},
doi = {10.1145/3334480.3383138},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {We present Augmented Displays, a new class of display systems directly combining high-resolution interactive surfaces with head-mounted Augmented Reality. This extends the screen real estate beyond the display and enables placing AR content directly at the display's borders or within the real environment. Furthermore, it enables people to interact with AR objects using natural pen and touch input in high precision on the surface. This combination allows for a variety of interesting applications. To illustrate them, we present two use cases: An immersive 3D modeling tool and an architectural design tool. Our goal is to demonstrate the potential of Augmented Displays as a foundation for future work in the design space of this exciting new class of systems.},
keywords = {Interactivity/Demonstration},
pubstate = {published},
tppubtype = {inproceedings}
}
Augmented Reality for Older Adults: Exploring Acceptability of Virtual Coaches for Home-based Balance Training in an Aging Population
Fariba Mostajeran (Uni Hamburg), Frank Steinicke (Uni Hamburg), Oscar Ariza (Uni Hamburg), Dimitrios Gatsios (University of Ioannina), Dimitrios Fotiadis (University of Ioannina)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{MostajeranAugmented,
title = {Augmented Reality for Older Adults: Exploring Acceptability of Virtual Coaches for Home-based Balance Training in an Aging Population},
author = {Fariba Mostajeran (Uni Hamburg) and Frank Steinicke (Uni Hamburg) and Oscar Ariza (Uni Hamburg) and Dimitrios Gatsios (University of Ioannina) and Dimitrios Fotiadis (University of Ioannina)},
doi = {10.1145/3313831.3376565},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Balance training has been shown to be effective in reducing risks of falling, which is a major concern for older adults. Usually, exercise programs are individually prescribed and monitored by physiotherapeutic or medical experts. Unfortunately, supervision and motivation of older adults during home-based exercises cannot be provided on a large scale, in particular, considering an ageing population. Augmented reality (AR) in combination with virtual coaches could provide a reasonable solution to this challenge. We present a first investigation of the acceptance of an AR coaching system for balance training, which can be performed at home. In a human-centered design approach we developed several mock-ups and prototypes, and evaluated them with 76 older adults. The results suggest that older adults find the system encouraging and stimulating. The virtual coach is perceived as an alive, calm, intelligent, and friendly human. However, usability of the entire AR system showed a significant negative correlation with participants' age.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Augmented Reality to Enable Users in Learning Case Grammar from Their Real-World Interactions
Fiona Draxler (LMU Munich), Audrey Labrie (Polytechnique Montréal), Albrecht Schmidt (LMU Munich), Lewis L. Chuang (LMU Munich)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper, Honorable Mention | Links:
@inproceedings{DraxlerAugmented,
title = {Augmented Reality to Enable Users in Learning Case Grammar from Their Real-World Interactions},
author = {Fiona Draxler (LMU Munich) and Audrey Labrie (Polytechnique Montréal) and Albrecht Schmidt (LMU Munich) and Lewis L. Chuang (LMU Munich)},
url = {https://www.twitter.com/mimuc, Twitter},
doi = {10.1145/3313831.3376537},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Augmented Reality (AR) provides a unique opportunity to situate learning content in one's environment. In this work, we investigated how AR could be developed to provide an interactive context-based language learning experience. Specifically, we developed a novel handheld-AR app for learning case grammar by dynamically creating quizzes, based on real-life objects in the learner's surroundings. We compared this to the experience of learning with a non-contextual app that presented the same quizzes with static photographic images. Participants found AR suitable for use in their everyday lives and enjoyed the interactive experience of exploring grammatical relationships in their surroundings. Nonetheless, Bayesian tests provide substantial evidence that the interactive and context-embedded AR app did not improve case grammar skills, vocabulary retention, and usability over the experience with equivalent static images. Based on this, we propose how language learning apps could be designed to combine the benefits of contextual AR and traditional approaches.},
keywords = {Full Paper, Honorable Mention},
pubstate = {published},
tppubtype = {inproceedings}
}
Augmented Reality Training for Industrial Assembly Work – Are Projection-based AR Assistive Systems an Appropriate Tool for Assembly Training?
Sebastian Büttner (TU Clausthal / TH OWL), Michael Prilla (TU Clausthal), Carsten Röcker (TH OWL)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper, Honorable Mention | Links:
@inproceedings{BuettnerAugmented,
title = {Augmented Reality Training for Industrial Assembly Work – Are Projection-based AR Assistive Systems an Appropriate Tool for Assembly Training?},
author = {Sebastian Büttner (TU Clausthal / TH OWL) and Michael Prilla (TU Clausthal) and Carsten Röcker (TH OWL)},
url = {https://www.twitter.com/HCISGroup},
doi = {10.1145/3313831.3376720},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {"Augmented Reality (AR) systems are on their way to industrial application, e.g. projection-based AR is used to enhance assembly work. Previous studies showed advantages of the systems in permanent-use scenarios, such as faster assembly times.
In this paper, we investigate whether such systems are suitable for training purposes. Within an experiment, we observed the training with a projection-based AR system over multiple sessions and compared it with a personal training and a paper manual training. Our study shows that projection-based AR systems offer only small benefits in the training scenario. While a systematic mislearning of content is prevented through immediate feedback, our results show that the AR training does not reach the personal training in terms of speed and recall precision after 24 hours. Furthermore, we show that once an assembly task is properly trained, there are no differences in the long-term recall precision, regardless of the training method."},
keywords = {Full Paper, Honorable Mention},
pubstate = {published},
tppubtype = {inproceedings}
}
In this paper, we investigate whether such systems are suitable for training purposes. Within an experiment, we observed the training with a projection-based AR system over multiple sessions and compared it with a personal training and a paper manual training. Our study shows that projection-based AR systems offer only small benefits in the training scenario. While a systematic mislearning of content is prevented through immediate feedback, our results show that the AR training does not reach the personal training in terms of speed and recall precision after 24 hours. Furthermore, we show that once an assembly task is properly trained, there are no differences in the long-term recall precision, regardless of the training method."
Authentication Beyond Desktops and Smartphones: Novel Approaches for Smart Devices and Environments
Stefan Schneegass (University of Duisburg-Essen), Florian Alt (Bundeswehr University Munich), Angela Sasse (Ruhr University Bochum), Daniel Vogel (University of Waterloo)
Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Workshop | Links:
@workshop{SchneegassAuthentication,
title = {Authentication Beyond Desktops and Smartphones: Novel Approaches for Smart Devices and Environments},
author = {Stefan Schneegass (University of Duisburg-Essen) and Florian Alt (Bundeswehr University Munich) and Angela Sasse (Ruhr University Bochum) and Daniel Vogel (University of Waterloo)},
url = {https://www.hci.wiwi.uni-due.de/veranstaltungen/chi-2020-workshop-on-authentication-beyond-desktops-and-smartphones/, Workshop Website},
doi = {10.1145/3334480.3375144},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Much of the research on authentication in the past decades focused on developing authentication mechanisms for desktop computers and smartphones with the goal of making them both secure and usable. At the same time, the increasing number of smart devices that are becoming part of our everyday life creates new challenges for authentication, in particular since many of those devices are not designed and developed with authentication in mind. Examples include but are not limited to wearables, AR and VR glasses, devices in smart homes, and public displays. The goal of this workshop is to develop a common understanding of challenges and opportunities smart devices and environments create for secure and usable authentication. Therefore, we will bring together researchers and practitioners from HCI, usable security, and specific application areas (e.g., smart homes, wearables) to develop a research agenda for future approaches to authentication.},
keywords = {Workshop},
pubstate = {published},
tppubtype = {workshop}
}
Automated Cars as Living Rooms and Offices: Challenges and Opportunities
Clemens Schartmüller (Technische Hochschule Ingolstadt (THI)), Andreas Riener (Technische Hochschule Ingolstadt (THI)), Orit Shaer (Wellesley College), Shamsi Iqbal (Microsoft Research), Sayan Sarcar (University of Tsukuba), Andrew L. Kun (University of New Hampshire), Linda Ng Boyle (University of Washington)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Other | Links:
@inproceedings{SchartmuellerAutomated,
title = {Automated Cars as Living Rooms and Offices: Challenges and Opportunities},
author = {Clemens Schartmüller (Technische Hochschule Ingolstadt (THI)) and Andreas Riener (Technische Hochschule Ingolstadt (THI)) and Orit Shaer (Wellesley College) and Shamsi Iqbal (Microsoft Research) and Sayan Sarcar (University of Tsukuba) and Andrew L. Kun (University of New Hampshire) and Linda Ng Boyle (University of Washington)},
doi = {10.1145/3334480.3381054},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {With increasing automation of the driving task, cars’ cockpits are transforming towards living spaces rather than pure modalities of transport. The promise of automated vehicles being individual places for relaxation and productivity while on-the-go, however, requires significant research. Not only safety-critical questions, but also issues related to ergonomic design, human factors for interactive systems, and social aspects have to be investigated. This special interests group presents an opportunity for connecting various CHI communities on these problems, which need to be solved under time-pressure, because automated vehicles are coming – whether or not the HCI-related issues are solved.},
keywords = {Other},
pubstate = {published},
tppubtype = {inproceedings}
}
Automated Usability Evaluation of Virtual Reality Applications
Patrich Harms (University of Göttingen)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Tags: Other | Links:
@inproceedings{HarmsAutomated,
title = {Automated Usability Evaluation of Virtual Reality Applications},
author = {Patrich Harms (University of Göttingen)},
doi = {10.1145/3301423},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Other},
pubstate = {published},
tppubtype = {inproceedings}
}
Autonomous Vehicle-Pedestrian Interaction Across Cultures: Towards Designing Better External Human Machine Interfaces (eHMIs)
Champika Ranasinghe (University of Twente), Kai Holländer (LMU Munich), Rebecca Currano (Stanford University), David Sirkin (Stanford University), Dylan Moore (Stanford University), Stefan Schneegass (University of Duisburg-Essen), Wendy Ju (Cornell Tech)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{RanasingheAutonomous,
title = {Autonomous Vehicle-Pedestrian Interaction Across Cultures: Towards Designing Better External Human Machine Interfaces (eHMIs)},
author = {Champika Ranasinghe (University of Twente) and Kai Holländer (LMU Munich) and Rebecca Currano (Stanford University) and David Sirkin (Stanford University) and Dylan Moore (Stanford University) and Stefan Schneegass (University of Duisburg-Essen) and Wendy Ju (Cornell Tech)},
doi = {10.1145/3334480.3382957},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Drivers and pedestrians use various culturally-based nonverbal cues such as head movements, hand gestures, and eye contact when crossing roads. With the absence of a human driver, this communication becomes challenging in autonomous vehicle (AV)- pedestrian interaction. External human-machine interfaces (eHMIs) for AV-pedestrian interaction are being developed based on the research conducted mainly in North America and Europe, where the traffic and pedestrian behavior are very structured and follow the rules. In other cultures (e.g., South Asia), this can be very unstructured (e.g., pedestrians spontaneously crossing the road at non-cross walks is not very uncommon). However, research on investigating cross-cultural differences in AV-Pedestrian interaction is scarce. This research focuses on investigating cross-cultural differences in AV-Pedestrian interaction to gain insights useful for designing better eHMIs. This paper details three cross-cultural studies designed for this purpose, and that will be deployed in two different cultural settings: Sri Lanka and Germany.},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
Becoming a Robot – Overcoming Anthropomorphism with Techno-Mimesis
Judith Dörrenbächer (University of Siegen), Diana Löffler (University of Siegen), Marc Hassenzahl (University of Siegen)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{DoerrenbaecherBecoming,
title = {Becoming a Robot – Overcoming Anthropomorphism with Techno-Mimesis},
author = {Judith Dörrenbächer (University of Siegen) and Diana Löffler (University of Siegen) and Marc Hassenzahl (University of Siegen)},
doi = {10.1145/3313831.3376507},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Employing anthropomorphism in physical appearance and behavior is the most widespread strategy for designing social robots. In the present paper, we argue that imitating humans impedes the full exploration of robots’ social abilities. In fact, their very ‘thingness’ (e.g., sensors, rationality) is able to create ‘superpowers’ that go beyond human abilities, such as endless patience. To better identify these special abilities, we develop a performative method called ‘Techno-Mimesis’ and explore it in a series of workshops with robot designers. Specifically, we create ‘prostheses’ to allow designers to transform themselves into their future robot to experience use cases from the robot’s perspective, e.g., ‘seeing’ with a distance sensor rather than with eyes. This imperfect imitation helps designers to experience being human and being robot at the same time, making differences apparent and facilitating the discovery of a number of potential physical, cognitive, and communicational robotic superpowers.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Beep Beep: Building Trust with Sound
Matthias Schmidmaier (LMU Munich), Dominik Maurice Runge (LMU Munich), Heinrich Hußmann (LMU Munich)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{SchmidmaierBeep,
title = {Beep Beep: Building Trust with Sound},
author = {Matthias Schmidmaier (LMU Munich) and Dominik Maurice Runge (LMU Munich) and Heinrich Hußmann (LMU Munich)},
doi = {10.1145/3334480.3382848},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Audio is one modality that besides content transmission offers non-verbal cues that influence emotional perception. This allows to increase trust for example in privacy-sensitive systems like digital assistants. In this work we focus on basic audio feedback and explore how parameters like melody, pitch or tempo influence the creation of trust. We refer to related research in trust perception of voice, and evaluate if the derived concepts can be universally applied to simple sound patterns. Our study (n=39) shows significant effects for melody and mode, while tendencies were found for pitch and individual user preferences. We consider our findings to serve as basis for research towards the design of unobtrusive and trustworthy user experiences.},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
Bot or not? User Perceptions of Player Substitution with Deep Player Behavior Models
Johannes Pfau (University of Bremen), Jan David Smeddinck (Newcastle University), Ioannis Bikas (University of Bremen), Rainer Malaka (University of Bremen)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{PfauBot,
title = {Bot or not? User Perceptions of Player Substitution with Deep Player Behavior Models},
author = {Johannes Pfau (University of Bremen) and Jan David Smeddinck (Newcastle University) and Ioannis Bikas (University of Bremen) and Rainer Malaka (University of Bremen)},
url = {https://www.twitter.com/dmlabbremen, Twitter},
doi = {10.1145/3313831.3376223},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Many online games suffer when players drop off due to lost connections or quitting prematurely, which leads to match terminations or game-play imbalances. While rule-based outcome evaluations or substitutions with bots are frequently used to mitigate such disruptions, these techniques are often perceived as unsatisfactory. Deep learning methods have successfully been used in deep player behavior modelling (DPBM) to produce non-player characters or bots which show more complex behavior patterns than those modelled using traditional AI techniques. Motivated by these findings, we present an investigation of the player-perceived awareness, believability and representativeness, when substituting disconnected players with DPBM agents in an online-multiplayer action game. Both quantitative and qualitative outcomes indicate that DPBM agents perform similarly to human players and that players were unable to detect substitutions. In contrast, players were able to detect substitution with agents driven by more traditional heuristics.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
BrainCoDe: Electroencephalography-based Comprehension Detection during Reading and Listening
Christina Schneegass (LMU Munich), Thomas Kosch (LMU Munich), Andrea Baumann (LMU Munich), Marius Rusu (LMU Munich), Mariam Hassib (Bundeswehr University Munich), Heinrich Hussmann (LMU Munich)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{SchneegassBrainCode,
title = {BrainCoDe: Electroencephalography-based Comprehension Detection during Reading and Listening},
author = {Christina Schneegass (LMU Munich) and Thomas Kosch (LMU Munich) and Andrea Baumann (LMU Munich) and Marius Rusu (LMU Munich) and Mariam Hassib (Bundeswehr University Munich) and Heinrich Hussmann (LMU Munich)},
url = {https://www.twitter.com/mimuc, Twitter},
doi = {10.1145/3313831.3376707},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {The pervasive availability of media in foreign languages is a rich resource for language learning. However, learners are forced to interrupt media consumption whenever comprehension problems occur. We present BrainCoDe, a method to implicitly detect vocabulary gaps through the evaluation of event-related potentials (ERPs). In a user study (N=16), we evaluate BrainCoDe by investigating differences in ERP amplitudes during listening and reading of known words compared to unknown words. We found significant deviations in N400 amplitudes during reading and in N100 amplitudes during listening when encountering unknown words. To evaluate the feasibility of ERPs for real-time applications, we trained a classifier that detects vocabulary gaps with an accuracy of 87.13% for reading and 82.64% for listening, identifying eight out of ten words correctly as known or unknown. We show the potential of BrainCoDe to support media learning through instant translations or by generating personalized learning content.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Breaking The Experience: Effects of Questionnaires in VR User Studies
Susanne Putze (University of Bremen), Dmitry Alexandrovsky (University of Bremen), Felix Putze (University of Bremen), Sebastian Höffner (University of Bremen), Jan David Smeddinck (Newcastle University), Rainer Malaka (University of Bremen)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{PutzeBreaking,
title = {Breaking The Experience: Effects of Questionnaires in VR User Studies},
author = {Susanne Putze (University of Bremen) and Dmitry Alexandrovsky (University of Bremen) and Felix Putze (University of Bremen) and Sebastian Höffner (University of Bremen) and Jan David Smeddinck (Newcastle University) and Rainer Malaka (University of Bremen)},
url = {https://www.youtube.com/watch?v=iHdW3nphCZQ, Video
https://www.twitter.com/dmlabbremen, Twitter},
doi = {10.1145/3313831.3376144},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Questionnaires are among the most common research tools in virtual reality (VR) evaluations and user studies. However, transitioning from virtual worlds to the physical world to respond to VR experience questionnaires can potentially lead to systematic biases. Administering questionnaires in VR (inVRQs) is becoming more common in contemporary research. This is based on the intuitive notion that inVRQs may ease participation, reduce the Break in Presence (BIP) and avoid biases. In this paper, we perform a systematic investigation into the effects of interrupting the VR experience through questionnaires using physiological data as a continuous and objective measure of presence. In a user study (n=50), we evaluated question-asking procedures using a VR shooter with two different levels of immersion. The users rated their player experience with a questionnaire either inside or outside of VR. Our results indicate a reduced BIP for the employed INVRQ without affecting the self-reported player experience.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Capturing Experts' Mental Models to Organize a Collection of Haptic Devices: Affordances Outweigh Attributes
Hasti Seifi (Max Planck Institute for Intelligent Systems), Michael Oppermann (University of British Columbia), Julia Bullard (University of British Columbia), Karon MacLean (University of British Columbia), Katherine Kuchenbecker (Max Planck Institute for Intelligent Systems)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Tags: Full Paper | Links:
@inproceedings{SeifiCapturing,
title = {Capturing Experts' Mental Models to Organize a Collection of Haptic Devices: Affordances Outweigh Attributes},
author = {Hasti Seifi (Max Planck Institute for Intelligent Systems) and Michael Oppermann (University of British Columbia) and Julia Bullard (University of British Columbia) and Karon MacLean (University of British Columbia) and Katherine Kuchenbecker (Max Planck Institute for Intelligent Systems)},
doi = {10.1145/3313831.3376395},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Co-Design Futures for AI and Space: A Workbook Sprint
Henrik Mucha (Fraunhofer IOSB), Ricarda Jacobi (Technische Hochschule Ostwestfalen-Lippe), Kirsten Meyer (Technische Hochschule Ostwestfalen-Lippe), Dennis Mevißen (Fraunhofer IOSB), Sebastian Robert (Fraunhofer IOSB), Winfried Heusler (Schüco International KG), Daniel Arztmann (Schüco International KG)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Case Study | Links:
@inproceedings{MuchaCo,
title = {Co-Design Futures for AI and Space: A Workbook Sprint},
author = {Henrik Mucha (Fraunhofer IOSB) and Ricarda Jacobi (Technische Hochschule Ostwestfalen-Lippe) and Kirsten Meyer (Technische Hochschule Ostwestfalen-Lippe) and Dennis Mevißen (Fraunhofer IOSB) and Sebastian Robert (Fraunhofer IOSB) and Winfried Heusler (Schüco International KG) and Daniel Arztmann (Schüco International KG)},
doi = {10.1145/3334480.3375203},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Artificial Intelligence (AI) is continuously moving into our surroundings. In its various forms, it has the potential to disrupt most aspects of human life. Yet, the discourse around AI has long been by experts and for experts. In this paper, we argue for a participatory approach towards designing human-AI interactions. We outline how we used design methodology to organise an interdisciplinary workshop with a diverse group of students – a workbook sprint with 45 participants from four different programs and 13 countries – to develop speculative design futures in five focus areas. We then provide insights into our findings and share our lessons learned regarding our workshop topic – AI and Space – our process, and our research. We learned that involving non-experts in complex technical discourses – such as AI – through the structural rigour of design methodology is a viable approach. We then conclude by laying out how others might use our findings and initiate their own workbook sprint to explore complex technologies in a human-centred way.},
keywords = {Case Study},
pubstate = {published},
tppubtype = {inproceedings}
}
Combining Embedded Computation and Image Tracking for Composing Tangible Augmented Reality
Tim Düwel (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus), Nico Herbig (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus), Denise Kahl (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus), Antonio Krüger (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{DuewelCombining,
title = {Combining Embedded Computation and Image Tracking for Composing Tangible Augmented Reality},
author = {Tim Düwel (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus) and Nico Herbig (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus) and Denise Kahl (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus) and Antonio Krüger (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus)},
doi = {10.1145/3334480.3383043},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {This work proposes a combination of embedded computation and marker tracking to provide more robust augmentations for composed objects in Tangible Augmented Reality. By integrating conductive elements into the tangibles’ sides, communication between embedded microprocessors is enabled, such that a connected composition can be computed without relying on any marker tracking information. Consequently, the virtual counterparts of the tangibles can be aligned, and this virtual composition can be attached to a single marker as a whole, increasing the tracking robustness towards occlusions and perspective distortions. A technical evaluation shows that this approach provides more robust augmentations if a tangible block in a composition is occluded by at least 50% or perspectively distorted by at least 40 to 50 degrees, depending on the block’s size. Additionally, a test with users relying on the use case of a couch configuration tool shows promising results regarding usability and user experience.},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
Demo of PolySense: How to Make Electrically Functional Textiles
Paul Strohmeier (Saarland University, SIC), Cedric Honnet (MIT Media Lab), Hannah Perner-Wilson (Kobakant), Marc Teyssier (Télécom Paris), Bruno Fruchard (Saarland University, SIC), Ana C. Baptista (CENIMAT/I3N), Jürgen Steimle (Saarland University, SIC)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Interactivity/Demonstration | Links:
@inproceedings{StrohmeierDemo,
title = {Demo of PolySense: How to Make Electrically Functional Textiles},
author = {Paul Strohmeier (Saarland University, SIC) and Cedric Honnet (MIT Media Lab) and Hannah Perner-Wilson (Kobakant) and Marc Teyssier (Télécom Paris) and Bruno Fruchard (Saarland University, SIC) and Ana C. Baptista (CENIMAT/I3N) and Jürgen Steimle (Saarland University, SIC)},
doi = {10.1145/3334480.33},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {We demonstrate a simple and accessible method for enhancing textiles with custom piezo-resistive properties. Based on in-situ polymerization, our method offers seamless integration at the material level, preserving a textile's haptic and mechanical properties. We demonstrate how to enhance a wide set of fabrics and yarns using only readily available tools. During each demo session, conference attendees may bring textile samples which will be polymerized in a shared batch. Attendees may keep these samples. While the polymerization is happening, attendees can inspect pre-made samples and explore how these might be integrated in functional circuits. Examples objects created using polymerization include rapid manufacturing of on-body interfaces, tie-dyed motion-capture clothing, and zippers that act as potentiometers.},
keywords = {Interactivity/Demonstration},
pubstate = {published},
tppubtype = {inproceedings}
}
Demonstrating Rapid Iron-On User Interfaces: Hands-on Fabrication of Interactive Textile Prototypes.
Konstantin Klamka (Technische Universität Dresden), Raimund Dachselt (Technische Universität Dresden), Jürgen Steimle (Saarland University)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Interactivity/Demonstration | Links:
@inproceedings{KlamkaDemonstrating,
title = {Demonstrating Rapid Iron-On User Interfaces: Hands-on Fabrication of Interactive Textile Prototypes.},
author = {Konstantin Klamka (Technische Universität Dresden) and Raimund Dachselt (Technische Universität Dresden) and Jürgen Steimle (Saarland University)},
url = {https://youtu.be/FyPcMLBXIm0, Video
https://www.twitter.com/imldresden, Twitter},
doi = {10.1145/3334480.3383139},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Rapid prototyping of interactive textiles is still challenging, since manual skills, several processing steps, and expert knowledge are involved. We demonstrate Rapid Iron-On User Interfaces, a novel fabrication approach for empowering designers and makers to enhance fabrics with interactive functionalities. It builds on heat-activated adhesive materials consisting of smart textiles and printed electronics, which can be flexibly ironed onto the fabric to create custom interface functionality. To support rapid fabrication in a sketching-like fashion, we developed a handheld dispenser tool for directly applying continuous functional tapes of desired length as well as discrete patches. We demonstrate versatile compositions techniques that allow to create complex circuits, utilize commodity textile accessories and sketch custom-shaped I/O modules. We further provide a comprehensive library of components for input, output, wiring and computing. Three example applications demonstrate the functionality, versatility and potential of this approach.},
keywords = {Interactivity/Demonstration},
pubstate = {published},
tppubtype = {inproceedings}
}
Demonstration of Drag:on – A VR Controller Providing Haptic Feedback Based on Drag and Weight Shift
André Zenner (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus), Donald Degraen (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus), Florian Daiber (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus), Antonio Krüger (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Interactivity/Demonstration | Links:
@inproceedings{ZennerDemonstration,
title = {Demonstration of Drag:on – A VR Controller Providing Haptic Feedback Based on Drag and Weight Shift},
author = {André Zenner (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus) and Donald Degraen (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus) and Florian Daiber (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus) and Antonio Krüger (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus)},
url = {https://youtu.be/kiNHqsaoJxc, Video},
doi = {10.1145/3334480.3383145},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {While standard VR controllers lack means to convey realistic, kinesthetic impressions of size, resistance or inertia, this demonstration presents Drag:on, an ungrounded shape-changing interaction device that provides dynamic passive haptic feedback based on drag, i.e. air resistance, and weight shift. Drag:on leverages the airflow at the controller during interaction. The device adjusts its surface area to change the drag and rotational inertia felt by the user. When rotated or swung, Drag:on conveys an impression of resistance, which we previously used in a VR user study to increase the haptic realism of virtual objects and interactions compared to standard controllers. Drag:on's feedback is suitable for rendering virtual mechanical resistances, virtual gas streams, and virtual objects differing in scale, material and fill state. In our demonstration, participants learn about this novel feedback concept, the implementation of our prototype and can experience the resistance feedback during a hands-on session.},
keywords = {Interactivity/Demonstration},
pubstate = {published},
tppubtype = {inproceedings}
}
Designing Safety Critical Interactions: Hunting Down Human Error
Susanne Boll (University of Oldenburg), Philippe Palanque (Université Paul Sabatier - Toulouse III), Alexander G. Mirnig (University of Salzburg), Jessica Cauchard (Ben Gurion University of the Negev), Margareta Holtensdotter Lützhöft (Western Norway University of Applied Sciences), Michael S. Feary (NASA Ames Research Center)
Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Workshop | Links:
@workshop{BollDesigning,
title = {Designing Safety Critical Interactions: Hunting Down Human Error},
author = {Susanne Boll (University of Oldenburg) and Philippe Palanque (Université Paul Sabatier - Toulouse III) and Alexander G. Mirnig (University of Salzburg) and Jessica Cauchard (Ben Gurion University of the Negev) and Margareta Holtensdotter Lützhöft (Western Norway University of Applied Sciences) and Michael S. Feary (NASA Ames Research Center) },
doi = {10.1145/3334480.3375148},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Almost any presentation dealing with severe incidents with safety-critical systems contains some slides about human errors made at operation time (i.e.,when the system is in use) that have been causing severe incidents or accidents. In many domains, a multitude of devices and machines from different brands in different generations have been crammed together, which we now call a command and control interface. The bridging of functions across devices, the decision making, the overview, the handling of partially imprecise or conflicting information are often just offloaded to the human. Thus, there appears to be a need to shift the attention from avoiding human error (at operation time) to avoiding design error.In this workshop, we aim to provide a forum to discuss such a paradigm shift and the implication on the methods and tools for designing and evaluating HCI technology in safety-critical environments.},
keywords = {Workshop},
pubstate = {published},
tppubtype = {workshop}
}
Developing a Personality Model for Speech-based Conversational Agents Using the Psycholexical Approach
Sarah Theres Völkel (LMU Munich), Ramona Schödel (LMU Munich), Daniel Buschek (University of Bayreuth), Clemens Stachl (Stanford University), Verena Winterhalter (LMU Munich), Markus Bühner (LMU Munich), Heinrich Hussmann (LMU Munich)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper, Honorable Mention | Links:
@inproceedings{VoelkelDeveloping,
title = {Developing a Personality Model for Speech-based Conversational Agents Using the Psycholexical Approach},
author = {Sarah Theres Völkel (LMU Munich) and Ramona Schödel (LMU Munich) and Daniel Buschek (University of Bayreuth) and Clemens Stachl (Stanford University) and Verena Winterhalter (LMU Munich) and Markus Bühner (LMU Munich) and Heinrich Hussmann (LMU Munich)},
url = {https://www.twitter.com/mimuc, Twitter},
doi = {10.1145/3313831.3376210},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {We present the first systematic analysis of personality dimensions developed specifically to describe the personality of speech-based conversational agents. Following the psycholexical approach from psychology, we first report on a new multi-method approach to collect potentially descriptive adjectives from 1) a free description task in an online survey (228 unique descriptors), 2) an interaction task in the lab (176 unique descriptors), and 3) a text analysis of 30,000 online reviews of conversational agents (Alexa, Google Assistant, Cortana) (383 unique descriptors). We aggregate the results into a set of 349 adjectives, which are then rated by 744 people in an online survey. A factor analysis reveals that the commonly used Big Five model for human personality does not adequately describe agent personality. As an initial step to developing a personality model, we propose alternative dimensions and discuss implications for the design of agent personalities, personality-aware personalisation, and future research.},
keywords = {Full Paper, Honorable Mention},
pubstate = {published},
tppubtype = {inproceedings}
}
Dynamics of Aimed Mid-air Movements
Myroslav Bachynskyi (University of Bayreuth), Jörg Müller (University of Bayreuth)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Tags: Full Paper | Links:
@inproceedings{BachynskyiDynamics,
title = {Dynamics of Aimed Mid-air Movements},
author = {Myroslav Bachynskyi (University of Bayreuth) and Jörg Müller (University of Bayreuth)},
doi = {10.1145/3313831.3376194},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
EmpathicGPS: Exploring the Role of Voice Tonality in Navigation Systems during Simulated Driving
Sebastian Zepf (Mercedes-Benz AG), Neska El Haouij (Massachusetts Institute of Technology), Wolfgang Minker (Ulm University), Javier Hernandez (Massachusetts Institute of Technology), Rosalind Picard (Massachusetts Institute of Techology)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Tags: Late Breaking Work | Links:
@inproceedings{ZepfEmpathicGPS,
title = {EmpathicGPS: Exploring the Role of Voice Tonality in Navigation Systems during Simulated Driving},
author = {Sebastian Zepf (Mercedes-Benz AG) and Neska El Haouij (Massachusetts Institute of Technology) and Wolfgang Minker (Ulm University) and Javier Hernandez (Massachusetts Institute of Technology) and Rosalind Picard (Massachusetts Institute of Techology)},
doi = {10.1145/3334480.3382935},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
Enemy Within: Long-term Motivation Effects of Deep Player Behavior Models for Dynamic Difficulty Adjustment
Johannes Pfau (University of Bremen), Jan David Smeddinck (Newcastle University), Rainer Malaka (University of Bremen)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{PfauEnemy,
title = {Enemy Within: Long-term Motivation Effects of Deep Player Behavior Models for Dynamic Difficulty Adjustment},
author = {Johannes Pfau (University of Bremen) and Jan David Smeddinck (Newcastle University) and Rainer Malaka (University of Bremen)},
url = {https://www.youtube.com/watch?v=QOdFmvQnPJQ, Video
https://www.twitter.com/dmlabbremen, Twitter},
doi = {10.1145/3313831.3376423},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Balancing games and producing content that remains interesting and challenging is a major cost factor in the design and maintenance of games. Dynamic difficulty adjustment (DDA) can successfully tune challenge levels to player abilities, but when implemented with classic heuristic parameter tuning (HPT) often turns out to be very noticeable, e.g. as “rubber-banding”. Deep learning techniques can be employed for deep player behavior modeling (DPBM), enabling more complex adaptivity, but effects over frequent and longer-lasting game engagements, as well as comparisons to HPT have not been empirically investigated. We present a situated study of the effects of DDA via DPBM as compared to HPT on intrinsic motivation, perceived challenge and player motivation in a real-world MMORPG. The results indicate that DPBM can lead to significant improvements in intrinsic motivation and players prefer game experience episodes featuring DPBM over experience episodes with classic difficulty management.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Evaluation of a Financial Portfolio Visualization using Computer Displays and Mixed Reality Devices with Domain Experts
Kay Schroeder (Zuyd University of Applied Sciences), Batoul Ajdadilish (Zuyd University of Applied Sciences), Alexander P. Henkel (Zuyd University of Applied Sciences), André Calero Valdez (RWTH Aachen University)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{SchroederEvaluation,
title = {Evaluation of a Financial Portfolio Visualization using Computer Displays and Mixed Reality Devices with Domain Experts},
author = {Kay Schroeder (Zuyd University of Applied Sciences) and Batoul Ajdadilish (Zuyd University of Applied Sciences) and Alexander P. Henkel (Zuyd University of Applied Sciences) and André Calero Valdez (RWTH Aachen University)},
doi = {10.1145/3313831.3376556},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {With the advent of mixed reality devices such as the Microsoft HoloLens, developers have been faced with the challenge to utilize the third dimension in information visualization effectively. Research on stereoscopic devices has shown that three-dimensional representation can improve accuracy in specific tasks (e.g., network visualization). Yet, so far the field has remained mute on the underlying mechanism. Our study systematically investigates the differences in user perception between a regular monitor and a mixed reality device. In a real-life within-subject experiment in the field with twenty-eight investment bankers, we assessed subjective and objective task performance with two- and three-dimensional systems, respectively. We tested accuracy with regard to position, size, and color using single and combined tasks. Our results do not show a significant difference in accuracy between mixed-reality and standard 2D monitor visualizations.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Evaluation of Natural User Interfaces in the Creative Industries
Georg Volkmar (University of Bremen), Thomas Muender (University of Bremen), Dirk Wenig (University of Bremen), Rainer Malaka (University of Bremen)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Case Study | Links:
@inproceedings{VolkmarEvaluation,
title = {Evaluation of Natural User Interfaces in the Creative Industries},
author = {Georg Volkmar (University of Bremen) and Thomas Muender (University of Bremen) and Dirk Wenig (University of Bremen) and Rainer Malaka (University of Bremen)},
url = {https://www.youtube.com/watch?v=Mb7-SsxVd6k, Video
https://www.twitter.com/dmlabbremen, Twitter},
doi = {10.1145/3334480.3375201},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {The case study presented in this paper is concerned with the applicability of natural user interfaces (NUI) in the context of previsualization (previs). For this purpose, we have developed a virtual reality (VR) based tool that includes NUIs as a novel way to perform previs-related tasks. For the application domains of animation, film, and theater, we conducted a quantitative and qualitative assessment of the prototype by realising projects that resembled real life productions in the creative industries. In collaboration with industry experts with different creative backgrounds, we conducted a large-scale evaluation and examined the potential of NUIs in a professional work context. Our results indicate that NUIs can offer a usable alternative to standard 3D design software, requiring only a short familiarization phase instead of extensive training to achieve the intended outcome. },
keywords = {Case Study},
pubstate = {published},
tppubtype = {inproceedings}
}
Everyday Proxy Objects for Virtual Reality
Florian Daiber (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus), Donald Degraen (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus), André Zenner (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus), Frank Steinicke (Universität Hamburg), Oscar Javier Ariza Núñez (Universität Hamburg), Adalberto L. Simeone (KU Leuven)
Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Workshop | Links:
@workshop{DaiberEveryday,
title = {Everyday Proxy Objects for Virtual Reality},
author = {Florian Daiber (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus) and Donald Degraen (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus) and André Zenner (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus) and Frank Steinicke (Universität Hamburg) and Oscar Javier Ariza Núñez (Universität Hamburg) and Adalberto L. Simeone (KU Leuven)},
url = {http://epo4vr.dfki.de/, Workshop Website
https://www.facebook.com/epo4vr, Facebook},
doi = {10.1145/3334480.3375165},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Immersive virtual experiences are becoming ubiquitous in our daily lives. Besides visual and auditory feedback, other senses like haptics, smell and taste can enhance immersion in virtual environments. Most solutions presented in the past require specialized hardware to provide appropriate feedback. To mitigate this need, researchers conceptualized approaches leveraging everyday physical objects as proxies instead. Transferring these approaches to varying physical environments and conditions, however, poses significant challenges to a variety of disciplines such as HCI, VR, haptics, tracking, perceptual science, design, etc. This workshop will explore the integration of everyday items for multi-sensory feedback in virtual experiences and sets course for respective future research endeavors. Since the community still seems to lack a cohesive agenda for advancing this domain, the goal of this workshop is to bring together individuals interested in everyday proxy objects to review past work, build a unifying research agenda, share ongoing work, and encourage collaboration.},
keywords = {Workshop},
pubstate = {published},
tppubtype = {workshop}
}
Examining Design Choices of Questionnaires in VR User Studies
Dmitry Alexandrovsky (University of Bremen), Susanne Putze (University of Bremen), Michael Bonfert (University of Bremen), Sebastian Höffner (University of Bremen), Pitt Michelmann (University of Bremen), Dirk Wenig (University of Bremen), Rainer Malaka (University of Bremen), Jan David Smeddinck (Newcastle University)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{AlexandrovskyExamining,
title = {Examining Design Choices of Questionnaires in VR User Studies},
author = {Dmitry Alexandrovsky (University of Bremen) and Susanne Putze (University of Bremen) and Michael Bonfert (University of Bremen) and Sebastian Höffner (University of Bremen) and Pitt Michelmann (University of Bremen) and Dirk Wenig (University of Bremen) and Rainer Malaka (University of Bremen) and Jan David Smeddinck (Newcastle University)},
url = {https://www.youtube.com/watch?v=T32Sop_LFu0&feature=youtu.be, Video
https://www.twitter.com/dmlabbremen, Twitter},
doi = {10.1145/3313831.3376260},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Questionnaires are among the most common research tools in virtual reality (VR) user studies. Transitioning from virtuality to reality for giving self-reports on VR experiences can lead to systematic biases. VR allows to embed questionnaires into the virtual environment which may ease participation and avoid biases. To provide a cohesive picture of methods and design choices for questionnaires in VR (inVRQ), we discuss 15 inVRQ studies from the literature and present a survey with 67 VR experts from academia and industry. Based on the outcomes, we conducted two user studies in which we tested different presentation and interaction methods of inVRQs and evaluated the usability and practicality of our design. We observed comparable completion times between inVRQs and questionnaires outside VR (outVRQs) with higher enjoyment but lower usability for INVRQS. These findings advocate the application of INVRQS and provide an overview of methods and considerations that lay the groundwork for inVRQ design.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Exploring Human-Robot Interaction with the Elderly: Results from a Ten-Week Case Study in a Care Home
Felix Carros (Uni Siegen), Johanna Meurer (Uni Siegen), Diana Löffler (Uni Siegen), David Unbehaun (Uni Siegen), Sarah Matthies (Uni Siegen), Inga Koch (Uni Siegen), Rainer Wieching (Uni Siegen), Dave Randall (Uni Siegen), Marc Hassenzahl (Uni Siegen), Volker Wulf (Uni Siegen)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{CarrosExploring,
title = {Exploring Human-Robot Interaction with the Elderly: Results from a Ten-Week Case Study in a Care Home},
author = {Felix Carros (Uni Siegen) and Johanna Meurer (Uni Siegen) and Diana Löffler (Uni Siegen) and David Unbehaun (Uni Siegen) and Sarah Matthies (Uni Siegen) and Inga Koch (Uni Siegen) and Rainer Wieching (Uni Siegen) and Dave Randall (Uni Siegen) and Marc Hassenzahl (Uni Siegen) and Volker Wulf (Uni Siegen)},
doi = {10.1145/3313831.3376402},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {We conducted an experiment to evaluate the LUI and our novel anchor-turning rotation control method regarding task performance, spatial cognition, VR sickness, sense of presence, usability and comfort in a path-integration task. The results show that VR Strider has a significant positive effect on the participants' angular and distance estimation, sense of presence and feeling of comfort compared to other established locomotion techniques, such as teleportation and joystick-based navigation.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Exploring Potentially Abusive Ethical, Social and Political Implications of Mixed Reality Research in HCI
Jan Gugenheimer (Institute Polytechnique des Paris LTCI/Télécom Paris / Ulm University), Mark McGill (University of Glasgow), Samuel Huron (Institute Polytechnique des ParisI3/Télécom Paris), Christian Mai (LMU Munich), Julie Williamson (University of Glasgow), Michael Nebeling (University of Michigan)
Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Workshop | Links:
@workshop{GugenheimerExploring,
title = {Exploring Potentially Abusive Ethical, Social and Political Implications of Mixed Reality Research in HCI},
author = {Jan Gugenheimer (Institute Polytechnique des Paris LTCI/Télécom Paris / Ulm University) and Mark McGill (University of Glasgow) and Samuel Huron (Institute Polytechnique des ParisI3/Télécom Paris) and Christian Mai (LMU Munich) and Julie Williamson (University of Glasgow) and Michael Nebeling (University of Michigan)},
doi = {10.1145/3334480.3375180},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {In recent years, Mixed Reality (MR) headsets have increasingly made advances in terms of capability, affordability and end-user adoption, slowly becoming everyday technology. HCI research typically explores positive aspects of these technologies, focusing on interaction, presence and immersive experiences. However, such technological advances and paradigm shifts often fail to consider the “dark patterns”, with potential abusive scenarios, made possible by new technologies (cf. smartphone addiction, social media anxiety disorder). While these topics are getting recent attention in related fields and with the general population, this workshop is aimed at starting an active exploration of abusive, ethical, social and political scenarios of MR research inside the HCI community. With an HCI lens, workshop participants will engage in critical reviews of emerging MR technologies and applications and develop a joint research agenda to address them.},
keywords = {Workshop},
pubstate = {published},
tppubtype = {workshop}
}
FaceHaptics: Robot Arm based Versatile Facial Haptics for Immersive Environments
Alexander Wilberz (Hochschule Bonn-Rhein-Sieg), Dominik Leschtschow (Hochschule Bonn-Rhein-Sieg), Christina Trepkowski (Hochschule Bonn-Rhein-Sieg), Jens Maiero (Hochschule Bonn-Rhein-Sieg), Ernst Kruijff (Hochschule Bonn-Rhein-Sieg), Bernhard Riecke (Simon Fraser University)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Tags: Full Paper | Links:
@inproceedings{WilberzFaceHaptics,
title = {FaceHaptics: Robot Arm based Versatile Facial Haptics for Immersive Environments},
author = {Alexander Wilberz (Hochschule Bonn-Rhein-Sieg) and Dominik Leschtschow (Hochschule Bonn-Rhein-Sieg) and Christina Trepkowski (Hochschule Bonn-Rhein-Sieg) and Jens Maiero (Hochschule Bonn-Rhein-Sieg) and Ernst Kruijff (Hochschule Bonn-Rhein-Sieg) and Bernhard Riecke (Simon Fraser University)},
doi = {10.1145/3313831.3376481},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Fairness and Decision-making in Collaborative Shift Scheduling Systems
Alarith Uhde (Uni Siegen), Nadine Schlicker (Ergosign GmbH), Dieter P. Wallach (Ergosign GmbH), Marc Hassenzahl (Uni Siegen)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{UhdeFairness,
title = {Fairness and Decision-making in Collaborative Shift Scheduling Systems},
author = {Alarith Uhde (Uni Siegen) and Nadine Schlicker (Ergosign GmbH) and Dieter P. Wallach (Ergosign GmbH) and Marc Hassenzahl (Uni Siegen)},
doi = {10.1145/3313831.3376656},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {The strains associated with shift work decrease healthcare workers' well-being. However, shift schedules adapted to their individual needs can partially mitigate these problems. From a computing perspective, shift scheduling was so far mainly treated as an optimization problem with little attention given to the preferences, thoughts, and feelings of the healthcare workers involved. In the present study, we explore fairness as a central, human-oriented attribute of shift schedules as well as the scheduling process. Three in-depth qualitative interviews and a validating vignette study revealed that while on an abstract level healthcare workers agree on equality as the guiding norm for a fair schedule, specific scheduling conflicts should foremost be resolved by negotiating the importance of individual needs. We discuss elements of organizational fairness, including transparency and team spirit. Finally, we present a sketch for fair scheduling systems, summarizing key findings for designers in a readily usable way.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Feminist Living Labs as Research Infrastructures for HCI: The Case of a Video Game Company
Michael Ahmadi (University of Siegen), Rebecca Eilert (University of Siegen), Anne Weibert (University of Siegen), Volker Wulf (University of Siegen), Nicola Marsden (Heilbronn University)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{AhmadiFeminist,
title = {Feminist Living Labs as Research Infrastructures for HCI: The Case of a Video Game Company},
author = {Michael Ahmadi (University of Siegen) and Rebecca Eilert (University of Siegen) and Anne Weibert (University of Siegen) and Volker Wulf (University of Siegen) and Nicola Marsden (Heilbronn University)},
doi = {10.1145/3313831.3376716},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {The number of women in IT is still low and companies struggle to integrate female professionals. The aim of our research is to provide methodological support for understanding and sharing experiences of gendered practices in the IT industry and encouraging sustained reflection about these matters over time. We established a Living Lab with that end in view, aiming to enhance female participation in the IT workforce and committing ourselves to a participatory approach to the sharing of women’s experiences. Here, using the case of a German video game company which participated in our Lab, we detail our lessons learned. We show that this kind of long-term participation involves challenges over the lifetime of the project but can lead to substantial benefits for organizations. Our findings demonstrate that Living Labs are suitable for giving voice to marginalized groups, addressing their concerns and evoking change possibilities. Nevertheless, uncertainties about long-term sustainability remain.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Finding the Inner Clock: A Chronobiology-based Calendar
Sarah Janböcke (University of Siegen), Alina Gawlitta (University of Siegen), Judith Dörrenbächer (University of Siegen), Marc Hassenzahl (University of Siegen)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Tags: Late Breaking Work | Links:
@inproceedings{JanböckeFinding,
title = {Finding the Inner Clock: A Chronobiology-based Calendar},
author = {Sarah Janböcke (University of Siegen) and Alina Gawlitta (University of Siegen) and Judith Dörrenbächer (University of Siegen) and Marc Hassenzahl (University of Siegen)},
doi = {10.1145/3334480.3382830},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
From Intentions to Successful Action: Supporting the Creation and Realization of Implementation Intentions
Toufique Bharmal (Uni Siegen), Marc Hassenzahl (Uni Siegen), Matthias Laschke (Uni Siegen)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{BharmalFrom,
title = {From Intentions to Successful Action: Supporting the Creation and Realization of Implementation Intentions},
author = {Toufique Bharmal (Uni Siegen) and Marc Hassenzahl (Uni Siegen) and Matthias Laschke (Uni Siegen)},
doi = {10.1145/3334480.3383018},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {"Good" intentions, such as to exercise more, only rarely spur action. In contrast, so-called "implementation intentions“ explicitly relate goal-directed behavior to particular situations (e.g., when, where, and how). Studies show that this has a positive effect on goal achievement. This paper explores whether technology can support the transformation of "good" intentions into concrete implementation intentions and their triggering as well as routinization. Specifically, we report three single case studies with a functional prototype. This prototype supported creating implementation intentions, putting them into a calendar, and being reminded through an object representative for the planned activity. Through the prototype, all three participants engaged more in the activities chosen to fulfill the intention. All in all, the notion of supporting individual implementation intentions through technology seems a viable strategy to support behavior change.},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
Gaze’N’Touch: Enhancing Text Selection on Mobile Devices Using Gaze
Radiah Rivu (University of Bundeswehr Munich), Yasmeen Abdrabou (University of Bundeswehr Munich), Ken Pfeuffer (University of Bundeswehr Munich), Mariam Hassib (University of Bundeswehr Munich), Florian Alt (University of Bundeswehr Munich)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{RivuGaze,
title = {Gaze’N’Touch: Enhancing Text Selection on Mobile Devices Using Gaze},
author = {Radiah Rivu (University of Bundeswehr Munich) and Yasmeen Abdrabou (University of Bundeswehr Munich) and Ken Pfeuffer (University of Bundeswehr Munich) and Mariam Hassib (University of Bundeswehr Munich) and Florian Alt (University of Bundeswehr Munich)
},
doi = {10.1145/3334480.3382802},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Text selection is a frequent task we do everyday to edit, modify or delete text. Selecting a word requires not only precision but also switching between selections and typing which influences both speed and error rates. We evaluate a novel concept, extending text editing with an additional modality, that is gaze. We present a user study (N=16) where we explore how the novel concepts, referred to as GazeButton, can improve text selection and compare it to touch-based selection. We also tested the effect of text size on the selection techniques by comparing two different text sizes. Results show that gaze-based selection was faster with larger text size, although not statistically significant. Qualitative feedback show a preference for gaze over touch, motivating a new direction of gaze usage in text editors.},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
GazeConduits: Calibration-Free Cross-Device Collaboration through Gaze and Touch
Simon Voelker (RWTH), Sebastian Hueber (RWTH), Christian Holz (ETH Zurich), Christian Remy (Aarhus University), Nicolai Marquardt (University College London)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{VoelkerGaze,
title = {GazeConduits: Calibration-Free Cross-Device Collaboration through Gaze and Touch},
author = {Simon Voelker (RWTH) and Sebastian Hueber (RWTH) and Christian Holz (ETH Zurich) and Christian Remy (Aarhus University) and Nicolai Marquardt (University College London)},
url = {https://youtu.be/Q59SQi0JUkg, Video},
doi = {10.1145/3313831.3376578},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {We present GazeConduits, a calibration-free ad-hoc mobile device setup that enables users to collaboratively interact with tablets, other users, and content in a cross-device setting using gaze and touch input. GazeConduits leverages recently presented phone capabilities to detect facial features and estimate users’ gaze directions. To join a collaborative setting, users place one or more tablets onto a shared table and position their phone in the center, which then tracks present users as well as their gaze direction to predict the tablets they look at. Using GazeConduits, we demonstrate a series of techniques for collaborative interaction across mobile devices for content selection and manipulation. Our evaluation with 20 simultaneous tablets on a table showed that GazeConduits can reliably identify at which tablet or at which collaborator a user is looking, enabling a rich set of interaction techniques.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Getting out of Out of Sight: Evaluation of AR Mechanisms for Awareness and Orientation Support in Occluded Multi-Room Settings
Niklas Osmers (TU Clausthal), Michael Prilla (TU Clausthal)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{OsmersGetting,
title = {Getting out of Out of Sight: Evaluation of AR Mechanisms for Awareness and Orientation Support in Occluded Multi-Room Settings},
author = {Niklas Osmers (TU Clausthal) and Michael Prilla (TU Clausthal)},
url = {https://www.twitter.com/HCISGroup, Twitter},
doi = {10.1145/3313831.3376742},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Augmented Reality can provide orientation and awareness in situations in which objects or people are occluded by physical structures. This is relevant for many situations in the workplace, where objects are scattered across rooms and people are out of sight. While several AR mechanisms have been proposed to provide awareness and orientation in these situations, little is known about their effect on people's performance when searching objects and coordinating with each other. In this paper, we compare three AR based mechanisms (map, x-ray, compass) according to their utility, usability, social presence, task load and users’ preferences. 48 participants had to work together in groups of four to find people and objects located around different rooms. Results show that map and x-ray performed best but provided least social presence among participants. We discuss these and other observations as well as potential impacts on designing AR awareness and orientation support.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Guess the Data: Data Work to Understand How People Make Sense of and Use Simple Sensor Data from Homes
Albrecht Kurze (Chemnitz University of Technology), Andreas Bischof (Chemnitz University of Technology), Sören Totzauer (Chemnitz University of Technology), Michael Storz (Chemnitz University of Technology), Maximilian Eibl (Chemnitz University of Technology), Margot Brereton (Queensland University of Technology), Arne Berger (Anhalt University of Applied Sciences)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{KurzeGuess,
title = {Guess the Data: Data Work to Understand How People Make Sense of and Use Simple Sensor Data from Homes},
author = {Albrecht Kurze (Chemnitz University of Technology) and Andreas Bischof (Chemnitz University of Technology) and Sören Totzauer (Chemnitz University of Technology) and Michael Storz (Chemnitz University of Technology) and Maximilian Eibl (Chemnitz University of Technology) and Margot Brereton (Queensland University of Technology) and Arne Berger (Anhalt University of Applied Sciences)},
url = {https://www.twitter.com/arneberger, Twitter},
doi = {10.1145/3313831.3376273},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Simple smart home sensors, e.g. for temperature or light, increasingly collect seemingly inconspicuous data. Prior work has shown that human sensemaking of such sensor data can reveal domestic activities. Such sensemaking presents an opportunity to empower people to understand the implications of simple smart home sensors. To investigate, we developed and field-tested the Guess the Data method, which enabled people to use and make sense of live data from their homes and to collectively interpret and reflect on anonymized data from the homes in our study. Our findings show how participants reconstruct behavior, both individually and collectively, expose the sensitive personal data of others, and use sensor data as evidence and for lateral surveillance within the household. We discuss the potential of our method as a participatory HCI method for investigating design of the IoT and implications created by doing data work on home sensors.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
HCI Across Borders and Sustainable Development Goals
Neha Kumar (Georgia Institute of Technology), Vikram Kamath Cannanure (Carnegie Mellon University), Dilrukshi Gamage (University of Moratuwa), Annu Sible Prabhakar (University of Cincinnati), Christian Sturm (Hamm-Lippstadt University of Applied Sciences), Cuauhtémoc Rivera Loaiza (Universidad Michoacana), Dina Sable (University of Toronto), Md. Moinuddin Bhuiyan (Grameenphone Ltd.), Mario A. Moreno Rocha (University of St Andrews)
Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Workshop | Links:
@workshop{KumarAcross,
title = {HCI Across Borders and Sustainable Development Goals},
author = {Neha Kumar (Georgia Institute of Technology) and Vikram Kamath Cannanure (Carnegie Mellon University) and Dilrukshi Gamage (University of Moratuwa) and Annu Sible Prabhakar (University of Cincinnati) and Christian Sturm (Hamm-Lippstadt University of Applied Sciences) and Cuauhtémoc Rivera Loaiza (Universidad Michoacana) and Dina Sable (University of Toronto) and Md. Moinuddin Bhuiyan (Grameenphone Ltd.) and Mario A. Moreno Rocha (University of St Andrews)},
doi = {10.1145/3334480.3375067},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {As HCI Across Borders aspires to celebrate its fifth year at CHI, and the CHI 2020 venue of Hawaii signifies a coming together of four continents, the goal of the 2020 symposium is to bring our focus to themes that unify and foster solidarity across borders. Thus we select the United Nations' Sustainable Development Goals as our object of study. Many communities within CHI focus on the constrained and ephemeral nature of resources, including the HCI for Development (HCI4D), Sustainable HCI (SHCI), and Crisis Informatics (CI) communities, among several others. We contend that it is time for these communities to come together in addressing issues of global relevance and impact, and for many more to care. Additionally, as the venue for CHI shifts to Asia in 2021, we aspire to prepare the conference and its participants to grapple with themes that might offer a different and novel perspective when engaged within the Global South.},
keywords = {Workshop},
pubstate = {published},
tppubtype = {workshop}
}
HeadReach: Using Head Tracking to Increase Reachability on Mobile Touch Devices
Simon Voelker (RWTH), Sebastian Hueber (RWTH), Christian Corsten (RWTH), Christian Remy (Aarhus University)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{VoelkerHeadReach,
title = {HeadReach: Using Head Tracking to Increase Reachability on Mobile Touch Devices},
author = {Simon Voelker (RWTH) and Sebastian Hueber (RWTH) and Christian Corsten (RWTH) and Christian Remy (Aarhus University)},
url = {https://youtu.be/IyVp5VFde2w, Video},
doi = {10.1145/3313831.3376868},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {People often operate their smartphones with only one hand, using just their thumb for touch input. With today’s larger smartphones, this leads to a reachability issue: Users can no longer comfortably touch everywhere on the screen without changing their grip. We investigate using the head tracking in modern smartphones to address this reachability issue. We developed three interaction techniques, pure head (PH), head+ touch (HT), and head area + touch (HA), to select targets beyond the reach of one’s thumb. In two user studies, we found that selecting targets using HT and HA had higher success rates than the default direct touch (DT) while standing (by about 9%) and walking (by about 12%), while being moderately slower. HT and HA were also faster than one of the best techniques, BezelCursor (BC) (by about 20% while standing and 6% while walking), while having the same success rate.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Heartbeats in the Wild: A Field Study Exploring ECG Biometrics in Everyday Life
Florian Lehmann (LMU Munich / University of Bayreuth), Daniel Buschek (University of Bayreuth)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper, Honorable Mention | Links:
@inproceedings{LehmannHeartbeats,
title = {Heartbeats in the Wild: A Field Study Exploring ECG Biometrics in Everyday Life},
author = {Florian Lehmann (LMU Munich / University of Bayreuth) and Daniel Buschek (University of Bayreuth)},
url = {https://www.twitter.com/mimuc, Twitter},
doi = {10.1145/3313831.3376536},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {This paper reports on an in-depth study of electrocardiogram (ECG) biometrics in everyday life. We collected ECG data from 20 people over a week, using a non-medical chest tracker. We evaluated user identification accuracy in several scenarios and observed equal error rates of 9.15% to 21.91%, heavily depending on 1) the number of days used for training, and 2) the number of heartbeats used per identification decision. We conclude that ECG biometrics can work in the wild but are less robust than expected based on the literature, highlighting that previous lab studies obtained highly optimistic results with regard to real life deployments. We explain this with noise due to changing body postures and states as well as interrupted measures. We conclude with implications for future research and the design of ECG biometrics systems for real world deployments, including critical reflections on privacy.},
keywords = {Full Paper, Honorable Mention},
pubstate = {published},
tppubtype = {inproceedings}
}
Heatmaps, Shadows, Bubbles, Rays: Comparing Mid-Air Pen Position Visualizations in Handheld AR
Philipp Wacker (RWTH), Adrian Wagner (RWTH), Simon Voelker (RWTH), Jan Borchers (RWTH)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{WackerHeatmaps,
title = {Heatmaps, Shadows, Bubbles, Rays: Comparing Mid-Air Pen Position Visualizations in Handheld AR},
author = {Philipp Wacker (RWTH) and Adrian Wagner (RWTH) and Simon Voelker (RWTH) and Jan Borchers (RWTH)},
url = {https://youtu.be/sFPP2xeAEP8, Video},
doi = {10.1145/3313831.3376848},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {In Handheld Augmented Reality, users look at AR scenes through the smartphone held in their hand. In this setting, having a mid-air pointing device like a pen in the other hand greatly expands the interaction possibilities. For example, it lets users create 3D sketches and models while on the go. However, perceptual issues in Handheld AR make it difficult to judge the distance of a virtual object, making it hard to align a pen to it. To address this, we designed and compared different visualizations of the pen's position in its virtual environment, measuring pointing precision, task time, activation patterns, and subjective ratings of helpfulness, confidence, and comprehensibility of each visualization. While all visualizations resulted in only minor differences in precision and task time, subjective ratings of perceived helpfulness and confidence favor a `heatmap' technique that colors the objects in the scene based on their distance to the pen.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
HiveFive: Immersion Preserving Attention Guidance in Virtual Reality
Daniel Lange (University of Oldenburg), Tim Claudius Stratmann (OFFIS - Institute for IT), Uwe Gruenefeld (OFFIS - Institute for IT), Susanne Boll (University of Oldenburg)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{LangeHiveFive,
title = {HiveFive: Immersion Preserving Attention Guidance in Virtual Reality},
author = {Daniel Lange (University of Oldenburg) and Tim Claudius Stratmann (OFFIS - Institute for IT) and Uwe Gruenefeld (OFFIS - Institute for IT) and Susanne Boll (University of Oldenburg)},
url = {https://youtu.be/df_onXBj7cM, Video},
doi = {10.1145/3313831.3376803},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Recent advances in Virtual Reality (VR) technology, such as larger fields of view, have made VR increasingly immersive. However, a larger field of view often results in a user focusing on certain directions and missing relevant content presented elsewhere on the screen. With HiveFive, we propose a technique that uses swarm motion to guide user attention in VR. The goal is to seamlessly integrate directional cues into the scene without losing immersiveness. We evaluate HiveFive in two studies. First, we compare biological motion (from a prerecorded swarm) with non-biological motion (from an algorithm), finding further evidence that humans can distinguish between these motion types and that, contrary to our hypothesis, non-biological swarm motion results in significantly faster response times. Second, we compare HiveFive to four other techniques and show that it not only results in fast response times but also has the smallest negative effect on immersion.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
How to Trick AI: Users’ Strategies for Protecting Themselves From Automatic Personality Assessment
Sarah Theres Völkel (LMU Munich), Renate Häuslschmid (Madeira Interactive Technologies Institute), Anna Werner (LMU Munich), Heinrich Hussmann (LMU Munich), Andreas Butz (LMU Munich)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{VoelkelHow,
title = {How to Trick AI: Users’ Strategies for Protecting Themselves From Automatic Personality Assessment},
author = {Sarah Theres Völkel (LMU Munich) and Renate Häuslschmid (Madeira Interactive Technologies Institute) and Anna Werner (LMU Munich) and Heinrich Hussmann (LMU Munich) and Andreas Butz (LMU Munich)},
url = {https://www.twitter.com/mimuc, Twitter},
doi = {10.1145/3313831.3376877},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Psychological targeting tries to influence and manipulate users' behaviour. We investigated whether users can protect themselves from being profiled by a chatbot, which automatically assesses users' personality. Participants interacted twice with the chatbot: (1) They chatted for 45 minutes in customer service scenarios and received their actual profile (baseline). (2) They then were asked to repeat the interaction and to disguise their personality by strategically tricking the chatbot into calculating a falsified profile. In interviews, participants mentioned 41 different strategies but could only apply a subset of them in the interaction. They were able to manipulate all Big Five personality dimensions by nearly 10%. Participants regarded personality as very sensitive data. As they found tricking the AI too exhaustive for everyday use, we reflect on opportunities for privacy protective designs in the context of personality-aware systems.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Hybridity as Design Strategy for Service Robots to Become Domestic Products
Diana Löffler (USI), Judith Dörrenbächer (USI), Julika Welge (USI), Marc Hassenzahl (USI)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{LoefflerHybridity,
title = {Hybridity as Design Strategy for Service Robots to Become Domestic Products},
author = {Diana Löffler (USI) and Judith Dörrenbächer (USI) and Julika Welge (USI) and Marc Hassenzahl (USI)},
doi = {10.1145/3334480.3382832},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Service robots have yet to occupy niches in domestic environments beyond vacuuming and lawn mowing. This can only happen by providing an alternative experience rather than mimicking and competing with humans, pets and already available appliances. We argue that the robot’s hybrid nature on a spectrum between ‘thing’ and ‘being’ is a suitable framework to create such a unique experience. More specifically, we argue that hybridity creates ambiguity which affords a wider range of practices to occur, from tool use to social interaction. We evaluate this idea in an online study with 61 participants who rated three robots with rather biomorphic (Pepper), hybrid (Sympartner) or device-like (Relay) appearance. Results show that the hybrid design affords a broader spectrum of use compared to designs inscribing to treat the robot as either possession or social companion. The study offers initial insights on ambiguity-through-hybridity as design strategy to help robots become domestic products.},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
I Wish You Were Smart(er): Investigating Users' Desires and Needs Towards Home Appliances
Sarah Prange (University of Bundeswehr Munich / LMU Munich), Florian Alt (University of Bundeswehr Munich)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{PrangeWish,
title = {I Wish You Were Smart(er): Investigating Users' Desires and Needs Towards Home Appliances},
author = {Sarah Prange (University of Bundeswehr Munich / LMU Munich) and Florian Alt (University of Bundeswehr Munich)},
doi = {10.1145/3334480.3382910},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {In this work, we present findings from an online survey (N=77) in which we assessed situations of users wishing for features or devices in their home to be smart(er). Our work is motivated by the fact that on one hand, several successful smart devices and features found their way into users’ homes (e.g., smart TVs, smart assistants, smart toothbrushes). On the other hand, a more holistic understanding of when and why users would like devices and features to be smart is missing as of today. Such knowledge is valuable for researchers and practitioners to inform the design of future smart home devices and features, in particular with regards to interaction techniques, privacy mechanisms, and, ultimately, acceptance and uptake. We found that users would appreciate smart features for various use cases, including remote control and multi-tasking, and are willing to share devices. We believe our work to be useful for designers and HCI researchers by supporting the design and evaluation of future smart devices.},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
Improving Humans' Ability to Interpret Deictic Gestures in Virtual Reality
Sven Mayer (Carnegie Mellon University / University of Stuttgart), Jens Reinhardt (Hamburg University of Applied Sciences), Robin Schweigert (University of Stuttgart), Brighten Jelke (Macalester College), Valentin Schwind (University of Stuttgart / University of Regensburg), Katrin Wolf (Hamburg University of Applied Sciences), Niels Henze (University of Regensburg)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{MayerImproving,
title = {Improving Humans' Ability to Interpret Deictic Gestures in Virtual Reality},
author = {Sven Mayer (Carnegie Mellon University / University of Stuttgart) and Jens Reinhardt (Hamburg University of Applied Sciences) and Robin Schweigert (University of Stuttgart) and Brighten Jelke (Macalester College) and Valentin Schwind (University of Stuttgart / University of Regensburg) and Katrin Wolf (Hamburg University of Applied Sciences) and Niels Henze (University of Regensburg)},
url = {https://www.youtube.com/watch?v=Afi4TPzHdlM, Youtube},
doi = {10.1145/3313831.3376340},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Collaborative Virtual Environments (CVEs) offer unique opportunities for human communication. Humans can interact with each other over a distance in any environment and visual embodiment they want. Although deictic gestures are especially important as they can guide other humans' attention, humans make systematic errors when using and interpreting them. Recent work suggests that the interpretation of vertical deictic gestures can be significantly improved by warping the pointing arm. In this paper, we extend previous work by showing that models enable to also improve the interpretation of deictic gestures at targets all around the user. Through a study with 28 participants in a CVE, we analyzed the errors users make when interpreting deictic gestures. We derived a model that rotates the arm of a pointing user's avatar to improve the observing users' accuracy. A second study with 24 participants shows that we can improve observers' accuracy by 22.9%. As our approach is not noticeable for users, it improves their accuracy without requiring them to learn a new interaction technique or distracting from the experience.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Improving the Usability and UX of the Swiss Internet Voting Interface
Karola Marky (TU Darmstadt), Verena Zimmermann (TU Darmstadt), Markus Funk (Cerence GmbH), Jörg Daubert (TU Darmstadt), Kira Bleck (TU Darmstadt), Max Mühlhäuser (TU Darmstadt)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{MarkyImproving,
title = {Improving the Usability and UX of the Swiss Internet Voting Interface},
author = {Karola Marky (TU Darmstadt) and Verena Zimmermann (TU Darmstadt) and Markus Funk (Cerence GmbH) and Jörg Daubert (TU Darmstadt) and Kira Bleck (TU Darmstadt) and Max Mühlhäuser (TU Darmstadt)},
url = {https://twitter.com/search?q=%23teamdarmstadt&src=typed_query&f=live, Twitter},
doi = {10.1145/3313831.3376769},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Up to 20% of residential votes and up to 70% of absentee votes in Switzerland are cast online. The Swiss scheme aims to provide individual verifiability by different verification codes. The voters have to carry out verification on their own, making the usability and UX of the interface of great importance. To improve the usability, we first performed an evaluation with 12 human-computer interaction experts to uncover usability weaknesses of the Swiss Internet voting interface. Based on the experts' findings, related work, and an exploratory user study with 36 participants, we propose a redesign that we evaluated in a user study with 49 participants. Our study confirmed that the redesign indeed improves the detection of incorrect votes by 33% and increases the trust and understanding of the voters. Our studies furthermore contribute important recommendations for designing verifiable e-voting systems in general.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Improving User Experience of Eye Tracking-Based Interaction: Introspecting and Adapting Interfaces
Raphael Menges (University of Koblenz), Chandan Kumar (University of Koblenz), Steffen Staab (University of Koblenz, University of Stuttgart)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Other | Links:
@inproceedings{MengesImproving,
title = {Improving User Experience of Eye Tracking-Based Interaction: Introspecting and Adapting Interfaces},
author = {Raphael Menges (University of Koblenz) and Chandan Kumar (University of Koblenz) and Steffen Staab (University of Koblenz, University of Stuttgart)},
url = {https://www.twitter.com/AnalyticComp, Twitter},
doi = {10.1145/3338844},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Eye tracking systems have greatly improved in recent years, being a viable and affordable option as digital communication channel, especially for people lacking fine motor skills. Using eye tracking as an input method is challenging due to accuracy and ambiguity issues, and therefore research in eye gaze interaction is mainly focused on better pointing and typing methods. However, these methods eventually need to be assimilated to enable users to control application interfaces. A common approach to employ eye tracking for controlling application interfaces is to emulate mouse and keyboard functionality. We argue that the emulation approach incurs unnecessary interaction and visual overhead for users, aggravating the entire experience of gaze-based computer access. We discuss how the knowledge about the interface semantics can help reducing the interaction and visual overhead to improve the user experience. Thus, we propose the efficient introspection of interfaces to retrieve the interface semantics and adapt the interaction with eye gaze. We have developed a Web browser, GazeTheWeb, that introspects Web page interfaces and adapts both the browser interface and the interaction elements on Web pages for gaze input. In a summative lab study with 20 participants, GazeTheWeb allowed the participants to accomplish information search and browsing tasks significantly faster than an emulation approach. Additional feasibility tests of GazeTheWeb in lab and home environment showcase its effectiveness in accomplishing daily Web browsing activities and adapting large variety of modern Web pages to suffice the interaction for people with motor impairment.},
keywords = {Other},
pubstate = {published},
tppubtype = {inproceedings}
}
Improving Worker Engagement Through Conversational Microtask Crowdsourcing
Sihang Qiu (Delft University of Technology), Ujwal Gadiraju (Leibniz Universität Hannover), Alessandro Bozzon (Delft University of Technology)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Tags: Full Paper | Links:
@inproceedings{QiuImproving,
title = {Improving Worker Engagement Through Conversational Microtask Crowdsourcing},
author = {Sihang Qiu (Delft University of Technology) and Ujwal Gadiraju (Leibniz Universität Hannover) and Alessandro Bozzon (Delft University of Technology)},
doi = {10.1145/3313831.3376403},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Improvising with Machines – Designing Artistic Non-Human Actors
Matthias Laschke (Uni Siegen), Robin Neuhaus (Uni Siegen), Marc Hassenzahl (Uni Siegen), Claudius Lazzeroni (Folkwang University)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{LaschkeImprovising,
title = {Improvising with Machines – Designing Artistic Non-Human Actors},
author = {Matthias Laschke (Uni Siegen) and Robin Neuhaus (Uni Siegen) and Marc Hassenzahl (Uni Siegen) and Claudius Lazzeroni (Folkwang University)},
doi = {10.1145/3334480.3382825},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {For musicians, improvising with other musicians is not uncommon. But what happens when musicians engage in musical improvisation with semi-autonomous machines? We investigated a seminar in which design students built machines for musicians to improvise with. We explored the experiences of musicians when improvising with non-human musicians, as well as the challenges of designing non-human musicians. Among other things, we found that while from an outside perspective, the machines appeared as independent actors that interact with the musicians, the musicians experienced them as additional instruments they controlled. The interaction design of non-human actors was challenging for designers},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
In-game and Out-of-game Social Anxiety Influences Player Motivations, Activities, and Experiences in MMORPGs
Martin Dechant (University of Saskatchewan), Susanne Poeller (University of Trier), Colby Johanson (University of Saskatchewan), Katelyn Wiley (University of Saskatchewn), Regan Mandryk (University of Saskatchewan)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Tags: Full Paper | Links:
@inproceedings{DechantInOut,
title = {In-game and Out-of-game Social Anxiety Influences Player Motivations, Activities, and Experiences in MMORPGs},
author = {Martin Dechant (University of Saskatchewan) and Susanne Poeller (University of Trier) and Colby Johanson (University of Saskatchewan) and Katelyn Wiley (University of Saskatchewn) and Regan Mandryk (University of Saskatchewan)},
doi = {10.1145/3313831.3376734},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Initial Evaluation of Different Types of Virtual Reality Locomotion Towards a Pedestrian Simulator for Urban and Transportation Planning
Julian Kreimeier (Nuremberg Institute of Technology), Daniela Ullmann (Nuremberg Institute of Technology), Harald Kipke (Nuremberg Institute of Technology), Timo Götzelmann (Nuremberg Institute of Technology)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Tags: Late Breaking Work | Links:
@inproceedings{KreimeierInitial,
title = {Initial Evaluation of Different Types of Virtual Reality Locomotion Towards a Pedestrian Simulator for Urban and Transportation Planning},
author = {Julian Kreimeier (Nuremberg Institute of Technology) and Daniela Ullmann (Nuremberg Institute of Technology) and Harald Kipke (Nuremberg Institute of Technology) and Timo Götzelmann (Nuremberg Institute of Technology)},
doi = {10.1145/3334480.3382958},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
Interaction Techniques for Visual Exploration Using Embedded Word-Scale Visualizations
Pascal Goffin (University of Utah), Tanja Blascheck (University of Stuttgart), Petra Isenberg (Inria), Wesley Willett (University of Calgary)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{GoffinInteraction,
title = {Interaction Techniques for Visual Exploration Using Embedded Word-Scale Visualizations},
author = {Pascal Goffin (University of Utah) and Tanja Blascheck (University of Stuttgart) and Petra Isenberg (Inria) and Wesley Willett (University of Calgary)},
doi = {10.1145/3313831.3376842},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {We describe a design space of view manipulation interactions for small data-driven contextual visualizations (word-scale visualizations). These interaction techniques support an active reading experience and engage readers through exploration of embedded visualizations whose placement and content connect them to specific terms in a document. A reader could, for example, use our proposed interaction techniques to explore word-scale visualizations of stock market trends for companies listed in a market overview article. When readers wish to engage more deeply with the data, they can collect, arrange, compare, and navigate the document using the embedded word-scale visualizations, permitting more visualization-centric analyses. We support our design space with a concrete implementation, illustrate it with examples from three application domains, and report results from two experiments. The experiments show how view manipulation interactions helped readers examine embedded visualizations more quickly and with less scrolling and yielded qualitative feedback on usability and future opportunities.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Introducing Functional Biometrics: Using Body-Reflections as a Novel Class of Biometric Authentication Systems
Jonathan Liebers (Universität Duisburg-Essen), Stefan Schneegass (Universität Duisburg-Essen)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{LiebersIntroducing,
title = {Introducing Functional Biometrics: Using Body-Reflections as a Novel Class of Biometric Authentication Systems},
author = {Jonathan Liebers (Universität Duisburg-Essen) and Stefan Schneegass (Universität Duisburg-Essen)},
doi = {10.1145/3334480.3383059},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Human-computer authentication is a continuously important topic where besides security also the aspects of usability must be taken into consideration. Biometric authentication methods promise to fulfill both aspects to a high degree, yet they come with severe drawbacks, such as the lack of changeability of the utilized trait, in case it is leaked or stolen. To compensate for these disadvantages, we introduce a novel class of biometric authentication systems in this work, named Functional Biometrics. This approach regards the human body as a function that transforms a stimulus which is applied to the body by the authentication system. Both, the stimulus and the measured body reflection form a pair that can subsequently be used for authentication, yet the underlying function remains secret. Following this approach, we intend to disprove some of the drawbacks of traditional biometrics.},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
Investigating User-Created Gamification in an Image Tagging Task
Marc Schubhan (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus), Maximilian Altmeyer (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus), Dominic Buchheit (Saarland University), Pascal Lessel (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{SchubhanInvestigating,
title = {Investigating User-Created Gamification in an Image Tagging Task},
author = {Marc Schubhan (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus) and Maximilian Altmeyer (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus) and Dominic Buchheit (Saarland University) and Pascal Lessel (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus)},
url = {https://www.youtube.com/watch?v=C_2RE_Tfzys, Video},
doi = {10.1145/3313831.3376360},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Commonly, gamification is designed by developers and not by end-users. In this paper we investigate an approach where users take control of this process. Firstly, users were asked to describe their own gamification concepts which would motivate them to put more effort into an image tagging task. We selected this task as gamification has already been shown to be effective here in previous work. Based on these descriptions, an implementation was made for each concept and given to the creator. In a between-subjects study (n=71), our approach was compared to a no-gamification condition and two conditions with fixed gamification settings. We found that providing participants with an implementation of their own concept significantly increased the amount of generated tags compared to the other conditions. Although the quality of tags was lower, the number of usable tags remained significantly higher in comparison, suggesting the usefulness of this approach.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
It's Not Always Better When We're Together: Effects of Being Accompanied in Virtual Reality
Rufat Rzayev (University of Regensburg), Florian Habler (University of Regensburg), Polina Ugnivenko (University of Regensburg), Niels Henze (University of Regensburg), Valentin Schwind (University of Regensburg)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{RzayevIt,
title = {It's Not Always Better When We're Together: Effects of Being Accompanied in Virtual Reality},
author = {Rufat Rzayev (University of Regensburg) and Florian Habler (University of Regensburg) and Polina Ugnivenko (University of Regensburg) and Niels Henze (University of Regensburg) and Valentin Schwind (University of Regensburg)},
doi = {10.1145/3334480.3382826},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Virtual reality (VR) enables immersive applications that make rich content available independent of time and space. By replacing or supplementing physical face-to-face meetings, VR could also radically change how we socially interact with others. Despite this potential, the effect of transferring physical collaborative experience into a virtual one is unclear. Therefore, we investigated the experience differences between a collaborative virtual environment (CVE) and a physical environment. We used a museum visit as a task since it is a typical social experience and a promising use case for VR. 48 participants experienced the task in real and virtual environments, either alone or with a partner. Despite the potential of CVEs, we found that being in a virtual environment has adverse effects on the experience which is reinforced by being in the environment with another person. Based on quantitative and qualitative results, we provide recommendations for the design of future multi-user virtual environments.},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
JumpVR: Jump-Based Locomotion Augmentation for Virtual Reality
Dennis Wolf (Ulm University), Katja Rogers (Ulm University), Christoph Kunder (Ulm University), Enrico Rukzio (Ulm University)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{WolfJump,
title = {JumpVR: Jump-Based Locomotion Augmentation for Virtual Reality},
author = {Dennis Wolf (Ulm University) and Katja Rogers (Ulm University) and Christoph Kunder (Ulm University) and Enrico Rukzio (Ulm University)},
url = {https://youtu.be/JNWfs3-V1zQ, Video
https://www.twitter.com/mi_uulm, Twitter},
doi = {10.1145/3313831.3376243},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {One of the great benefits of virtual reality (VR) is the implementation of features that go beyond realism. Common “unrealistic” locomotion techniques (like teleportation) can avoid spatial limitation of tracking but minimize potential benefits of more realistic techniques (e.g., walking). As an alternative that combines realistic physical movement with hyper-realistic virtual outcome, we present JumpVR, a jump-based locomotion augmentation technique that virtually scales users’ physical jumps. In a user study (N=28), we show that jumping in VR (regardless of scaling) can significantly increase presence, motivation and immersion compared to teleportation, while largely not increasing simulator sickness. Further, participants reported higher immersion and motivation for most scaled jumping variants than forward-jumping. Our work shows the feasibility and benefits of jumping in VR and explores suitable parameters for its hyper-realistic scaling. We discuss design implications for VR experiences and research.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Leveraging Error Correction in Voice-based Text Entry by Talk-and-Gaze
Korok Sengupta (University of Koblenz), Sabin Bhattarai (University of Koblenz), Sayan Sarcar (University of Tsukuba), Scott MacKenzie (York University), Steffen Staab (University of Stuttgart)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{SenguptaLeveraging,
title = {Leveraging Error Correction in Voice-based Text Entry by Talk-and-Gaze},
author = {Korok Sengupta (University of Koblenz) and Sabin Bhattarai (University of Koblenz) and Sayan Sarcar (University of Tsukuba) and Scott MacKenzie (York University) and Steffen Staab (University of Stuttgart)},
url = {https://www.twitter.com/AnalyticComp, Twitter},
doi = {10.1145/3313831.3376579},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {We present the design and evaluation of Talk-and-Gaze (TaG), a method for selecting and correcting errors with voice and gaze. TaG uses eye gaze to overcome the inability of voice- only systems to provide spatial information. The user’s point of gaze is used to select an erroneous word either by dwelling on the word for 800 ms (D-TaG) or by uttering a “select” voice command (V-TaG). A user study with 12 participants com- pared D-TaG, V-TaG, and a voice-only method for selecting and correcting words. Corrections were performed more than 20% faster with D-TaG compared to the V-TaG or voice-only methods. As well, D-TaG was observed to require 24% less selection effort than V-TaG and 11% less selection effort than voice-only error correction. D-TaG was well received in a subjective assessment with 66% of users choosing it as their preferred choice for error correction in voice-based text entry},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Levitation Simulator: Prototyping Ultrasonic Levitation Interfaces in Virtual Reality
Viktorija Paneva (University of Bayreuth), Myroslav Bachynskyi (University of Bayreuth), Jörg Müller (University of Bayreuth)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Tags: Full Paper, Honorable Mention | Links:
@inproceedings{PanevaLevitation,
title = {Levitation Simulator: Prototyping Ultrasonic Levitation Interfaces in Virtual Reality},
author = {Viktorija Paneva (University of Bayreuth) and Myroslav Bachynskyi (University of Bayreuth) and Jörg Müller (University of Bayreuth)},
doi = {10.1145/3313831.3376409},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Full Paper, Honorable Mention},
pubstate = {published},
tppubtype = {inproceedings}
}
Listen to Developers! A Participatory Design Study on Security Warnings for Cryptographic APIs
Peter Gorski (TH Köln / University of Applied Sciences), Yasemin Acar (Leibniz University Hannover), Luigi Lo Iacono (TH Köln / University of Applied Sciences), Sascha Fahl (Leibniz University Hannover)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Tags: Full Paper | Links:
@inproceedings{GorskiListen,
title = {Listen to Developers! A Participatory Design Study on Security Warnings for Cryptographic APIs},
author = {Peter Gorski (TH Köln / University of Applied Sciences) and Yasemin Acar (Leibniz University Hannover) and Luigi Lo Iacono (TH Köln / University of Applied Sciences) and Sascha Fahl (Leibniz University Hannover)},
doi = {10.1145/3313831.3376142},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
MazeRunVR: An Open Benchmark for VR Locomotion Performance, Preference and Sickness in the Wild
Kirill Ragozin (Keio University), Karola Marky (TU Darmstadt), Kai Kunze (Keio University), Yun Suen Pai (University of Auckland)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{RagozinMaze,
title = {MazeRunVR: An Open Benchmark for VR Locomotion Performance, Preference and Sickness in the Wild},
author = {Kirill Ragozin (Keio University) and Karola Marky (TU Darmstadt) and Kai Kunze (Keio University) and Yun Suen Pai (University of Auckland)},
doi = {10.1145/3334480.3383035},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Locomotion in virtual reality (VR) is one of the biggest problems for large scale adoption of VR applications. Yet, to our knowledge, there are few studies conducted in-the-wild to understand performance metrics and general user preference for different mechanics. In this paper, we present the first steps towards an open framework to create a VR locomotion benchmark. As a viability study, we investigate how well the users move in VR when using three different locomotion mechanics. It was played in over 124 sessions across 10 countries in a period of three weeks. The included prototype locomotion mechanics are arm swing, walk-in-place and trackpad movement. We found that overall, users performed significantly faster using arm swing and trackpad when compared to walk-in-place. For subjective preference, arm swing was significantly more preferred over the other two methods. Finally for induced sickness, walkin-place was the overall most sickness-inducing locomotion method.},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
Meaningful Technology at Work – A Reflective Design Case of Improving Radiologists’ Wellbeing Through Medical Technology
Matthias Laschke (Uni Siegen), Christoph Braun (Siemens Healthineers), Robin Neuhaus (Uni Siegen), Marc Hassenzahl (Uni Siegen)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{LaschkeMeaningful,
title = {Meaningful Technology at Work – A Reflective Design Case of Improving Radiologists’ Wellbeing Through Medical Technology},
author = {Matthias Laschke (Uni Siegen) and Christoph Braun (Siemens Healthineers) and Robin Neuhaus (Uni Siegen) and Marc Hassenzahl (Uni Siegen)},
doi = {10.1145/3313831.3376710},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {The present paper presents a real-world case with a large medial technology provider, showing that medical technology could be designed more holistically to improve radiologists' wellbeing explicitly. Despite all skepticism, our prototypical applications resonated well among the radiologists involved, the healthcare provider, and other customers of the MTP.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Mix&Match: Towards Omitting Modelling through In-Situ Alteration and Remixing of Model Repository Artifacts in Mixed Reality
Evgeny Stemasov (Ulm University), Tobias Wagner (Ulm University), Jan Gugenheimer (Ulm University), Enrico Rukzio (Ulm University)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{StemasovMix,
title = {Mix&Match: Towards Omitting Modelling through In-Situ Alteration and Remixing of Model Repository Artifacts in Mixed Reality},
author = {Evgeny Stemasov (Ulm University) and Tobias Wagner (Ulm University) and Jan Gugenheimer (Ulm University) and Enrico Rukzio (Ulm University)},
url = {https://youtu.be/Dyb0QRtNtag, Video
https://www.twitter.com/mi_uulm, Twitter},
doi = {10.1145/3313831.3376839},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {The accessibility of tools to model artifacts is one of the core driving factors for the adoption of Personal Fabrication. Subsequently, model repositories like Thingiverse became important tools in (novice) makers' processes. They allow them to shorten or even omit the design process, offloading a majority of the effort to other parties. However, steps like measurement of surrounding constraints (e.g., clearance) which exist only inside the users' environment, can not be similarly outsourced. We propose Mix&Match a mixed-reality-based system which allows users to browse model repositories, preview the models in-situ, and adapt them to their environment in a simple and immediate fashion. Mix&Match aims to provide users with CSG operations which can be based on both virtual and real geometry. We present interaction patterns and scenarios for Mix&Match, arguing for the combination of mixed reality and model repositories. This enables almost modelling-free personal fabrication for both novices and expert makers.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Momentary Emotion Elicitation and Capture (MEEC)
Abdallah El Ali (Centrum Wiskunde / Informatica Amsterdam), Monica Perusquía-Hernández (NTT Communication Science Laboratories Atsugi), Pete Denman (Intel Corp Portland), Yomna Abdelrahman (Bundeswehr University Munich), Mariam Hassib (Bundeswehr University Munich), Alexander Meschtscherjakov (University of Salzburg), Denzil Ferreira (University of Oulu), Niels Henze (University of Regensburg)
Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Workshop | Links:
@workshop{AliMomentary,
title = {Momentary Emotion Elicitation and Capture (MEEC)},
author = {Abdallah El Ali (Centrum Wiskunde / Informatica Amsterdam) and Monica Perusquía-Hernández (NTT Communication Science Laboratories Atsugi) and Pete Denman (Intel Corp Portland) and Yomna Abdelrahman (Bundeswehr University Munich) and Mariam Hassib (Bundeswehr University Munich) and Alexander Meschtscherjakov (University of Salzburg) and Denzil Ferreira (University of Oulu) and Niels Henze (University of Regensburg)},
url = {https://meec-ws.com/, Workshop Website},
doi = {10.1145/3334480.3375175},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Recognizing human emotions and responding appropriately has the potential to radically change the way we interact with technology. However, to train machines to sensibly detect and recognize human emotions, we need valid emotion ground truths. A fundamental challenge here is the momentary emotion elicitation and capture (MEEC) from individuals continuously and in real-time, without adversely affecting user experience. In this first edition of the one-day CHI 2020 workshop, we will (a) explore and define novel elicitation tasks (b) survey sensing and annotation techniques (c) create a taxonomy of when and where to apply an elicitation method.},
keywords = {Workshop},
pubstate = {published},
tppubtype = {workshop}
}
Next Steps in Human-Computer Integration
Florian 'Floyd' Mueller (Monash University), Pedro Lopes (University of Chicago), Paul Strohmeier (University of Copenhagen / Saarland University), Wendy Ju (Cornell Tech), Caitlyn Seim (Stanford University), Martin Weigel (Honda Research Institute Europe), Suranga Nanayakkara (University of Auckland), Marianna Obrist (University of Essex), Zhuying Li (Monash University), Joseph Delfa (Monash University), Jun Nishida (University of Chicago), Elizabeth M. Gerber (Northwestern University), Dag Svanaes (NTNU / IT University of Copenhagen), Jonathan Grudin (Microsoft), Stefan Greuter (Deakin University), Kai Kunze (Keio University), Thomas Erickson (Independent researcher), Steven Greenspan (CA Technologies), Masahiko Inami (University of Tokyo), Joe Marshall (University of Nottingham), Harald Reiterer (University of Konstanz), Katrin Wolf (Beuth University of Applied Sciences Berlin), Jochen Meyer (OFFIS), Thecla Schiphorst (Simon Fraser University), Dakuo Wang (IBM Research), Pattie Maes (MIT Media Lab)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{MuellerIntegration,
title = {Next Steps in Human-Computer Integration},
author = {Florian 'Floyd' Mueller (Monash University) and Pedro Lopes (University of Chicago) and Paul Strohmeier (University of Copenhagen / Saarland University) and Wendy Ju (Cornell Tech) and Caitlyn Seim (Stanford University) and Martin Weigel (Honda Research Institute Europe) and Suranga Nanayakkara (University of Auckland) and Marianna Obrist (University of Essex) and Zhuying Li (Monash University) and Joseph Delfa (Monash University) and Jun Nishida (University of Chicago) and Elizabeth M. Gerber (Northwestern University) and Dag Svanaes (NTNU / IT University of Copenhagen) and Jonathan Grudin (Microsoft) and Stefan Greuter (Deakin University) and Kai Kunze (Keio University) and Thomas Erickson (Independent researcher) and Steven Greenspan (CA Technologies) and Masahiko Inami (University of Tokyo) and Joe Marshall (University of Nottingham) and Harald Reiterer (University of Konstanz) and Katrin Wolf (Beuth University of Applied Sciences Berlin) and Jochen Meyer (OFFIS) and Thecla Schiphorst (Simon Fraser University) and Dakuo Wang (IBM Research) and Pattie Maes (MIT Media Lab)},
doi = {10.1145/3313831.3376242},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Human-Computer Integration (HInt) is an emerging paradigm in which computational and human systems are closely interwoven. Integrating computers with the human body is not new. However, we believe that with rapid technological advancements, increasing real-world deployments, and growing ethical and societal implications, it is critical to identify an agenda for future research. We present a set of challenges for HInt research, formulated over the course of a five-day workshop consisting of 29 experts who have designed, deployed, and studied HInt systems. This agenda aims to guide researchers in a structured way towards a more coordinated and conscientious future of human-computer integration.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
NurseCare: Design and ‘In-The-Wild’ Evaluation of a Mobile System to Promote the Ergonomic Transfer of Patients
Maximilian Dürr (University of Konstanz), Carla Gröschel (University of Konstanz), Ulrike Pfeil (University of Konstanz), Harald Reiterer (University of Konstanz)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper, Honorable Mention | Links:
@inproceedings{DuerrNurseCare,
title = {NurseCare: Design and ‘In-The-Wild’ Evaluation of a Mobile System to Promote the Ergonomic Transfer of Patients},
author = {Maximilian Dürr (University of Konstanz) and Carla Gröschel (University of Konstanz) and Ulrike Pfeil (University of Konstanz) and Harald Reiterer (University of Konstanz)},
url = {https://youtu.be/BJaKsSOjW4k, Video
https://www.twitter.com/HCIGroupKN, Twitter},
doi = {10.1145/3313831.3376851},
year = {2020},
date = {2020-04-26},
urldate = {2020-04-07},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
institution = {University of Konstanz},
abstract = {Nurses are frequently required to transfer patients as part of their daily duties. However, the manual transfer of patients is a major risk factor for injuries to the back. Although the Kinaesthetics Care Conception can help to address this issue, existing support for the integration of the concept into nursing-care practice is low. We present NurseCare, a mobile system that aims to promote the practical application of ergonomic patient transfers based on the Kinaesthetics Care Conception. NurseCare consists of a wearable and a smartphone app. Key features of NurseCare include mobile accessible instructions for ergonomic patient transfers, in-situ feedback for the risky bending of the back, and long-term feedback. We evaluated NurseCare in a nine participant ‘in-the-wild’ evaluation. Results indicate that NurseCare can facilitate ergonomic work while providing a high user experience adequate to the nurses’ work domain, and reveal how NurseCare can be incorporated in given practices.},
type = {Full Paper},
keywords = {Full Paper, Honorable Mention},
pubstate = {published},
tppubtype = {inproceedings}
}
On Conducting Security Developer Studies with CS Students: Examining a Password-Storage Study with CS Students, Freelancers, and Company Developers
Alena Naiakshina (University of Bonn), Anastasia Danilova (University of Bonn), Eva Gerlitz (Fraunhofer FKIE), Matthew Smith (University of Bonn / Fraunhofer FKIE)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Tags: Full Paper | Links:
@inproceedings{NaiakshinaConducting,
title = {On Conducting Security Developer Studies with CS Students: Examining a Password-Storage Study with CS Students, Freelancers, and Company Developers},
author = {Alena Naiakshina (University of Bonn) and Anastasia Danilova (University of Bonn) and Eva Gerlitz (Fraunhofer FKIE) and Matthew Smith (University of Bonn / Fraunhofer FKIE)},
doi = {10.1145/3313831.3376791},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
One does not Simply RSVP: Mental Workload to Select Speed Reading Parameters using Electroencephalography
Thomas Kosch (LMU Munich), Albrecht Schmidt (LMU Munich), Simon Thanheiser (LMU Munich), Lewis L. Chuang (LMU Munich)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{KoschRSVP,
title = {One does not Simply RSVP: Mental Workload to Select Speed Reading Parameters using Electroencephalography},
author = {Thomas Kosch (LMU Munich) and Albrecht Schmidt (LMU Munich) and Simon Thanheiser (LMU Munich) and Lewis L. Chuang (LMU Munich)},
url = {https://www.twitter.com/mimuc, Twitter},
doi = {10.1145/3313831.3376766},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Rapid Serial Visual Presentation (RSVP) has gained popularity as a method for presenting text on wearable devices with limited screen space. Nonetheless, it remains unclear how to calibrate RSVP display parameters, such as spatial alignments or presentation rates, to suit the reader’s information processing ability at high presentation speeds. Existing methods rely on comprehension and subjective workload scores, which are influenced by the user’s knowledge base and subjective perception. Here, we use electroencephalography (EEG) to directly determine how individual information processing varies with changes in RSVP display parameters. Eighteen participants read text excerpts with RSVP in a repeated-measures design that manipulated the Text Alignment and Presentation Speed of text representation. We evaluated how predictive EEG metrics were of gains in reading speed, subjective workload, and text comprehension. We found significant correlations between EEG and increasing Presentation Speeds and propose how EEG can be used for dynamic selection of RSVP parameters.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Opportunities and Challenges of Text Input in Portable Virtual Reality
Pascal Knierim (LMU Munich), Thomas Kosch (LMU Munich), Johannes Groschopp (LMU Munich), Albrecht Schmidt (LMU Munich)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{KnierimOpportunities,
title = {Opportunities and Challenges of Text Input in Portable Virtual Reality},
author = {Pascal Knierim (LMU Munich) and Thomas Kosch (LMU Munich) and Johannes Groschopp (LMU Munich) and Albrecht Schmidt (LMU Munich)},
doi = {10.1145/3334480.3382920},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Text input in virtual reality is not widespread outside of labs, although being increasingly researched. Current setups require powerful components that are expensive or not portable, hence preventing effective in-the-wild use. Latest technological advances enable portable mixed reality experiences on smartphones. In this work, we propose a portable low-fidelity solution for text input in mixed reality on a physical keyboard that employs accessible off-the-shelf components. Through a user study with 24 participants, we show that our prototype leads to a significantly higher text input performance compared to soft keyboards. However, it falls behind on copy editing compared to soft keyboards. Qualitative inquiries revealed that participants enjoyed the ample display space and perceived the accompanied privacy as beneficial. Finally, we conclude with challenges and future research that builds upon the presented findings.},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
ORCSolver: An Efficient Solver for Adaptive GUI Layout with OR-Constraints
Yue Jiang (University of Maryland / MPI / Saarland University), Wolfgang Stuerzlinger (Simon Fraser University), Matthias Zwicker (University of Maryland), Christof Lutteroth (University of Bath)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Tags: Full Paper | Links:
@inproceedings{JiangORCSolver,
title = {ORCSolver: An Efficient Solver for Adaptive GUI Layout with OR-Constraints},
author = {Yue Jiang (University of Maryland / MPI / Saarland University) and Wolfgang Stuerzlinger (Simon Fraser University) and Matthias Zwicker (University of Maryland) and Christof Lutteroth (University of Bath)},
doi = {10.1145/3313831.3376610},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Outline Pursuits: Gaze-assisted Selection of Occluded Objects in Virtual Reality
Ludwig Sidenmark (Lancaster University), Christopher Clarke (Lancaster University), Xuesong Zhang (Katholieke Universiteit Leuven), Jenny Phu (Ludwig Maximilian University of Munich), Hans Gellersen (Aarhus University)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Tags: Full Paper | Links:
@inproceedings{SidenmarkOutline,
title = {Outline Pursuits: Gaze-assisted Selection of Occluded Objects in Virtual Reality},
author = {Ludwig Sidenmark (Lancaster University) and Christopher Clarke (Lancaster University) and Xuesong Zhang (Katholieke Universiteit Leuven) and Jenny Phu (Ludwig Maximilian University of Munich) and Hans Gellersen (Aarhus University)},
doi = {10.1145/3313831.3376438},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Performance and Experience of Throwing in Virtual Reality
Tim Zindulka (University of Bayreuth), Myroslav Bachynskyi (Bayreuth University), Jörg Müller (University of Bayreuth)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Tags: Full Paper | Links:
@inproceedings{ZindulkaPerformance,
title = {Performance and Experience of Throwing in Virtual Reality},
author = {Tim Zindulka (University of Bayreuth) and Myroslav Bachynskyi (Bayreuth University) and Jörg Müller (University of Bayreuth)},
doi = {10.1145/3313831.3376639},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
PhysioSkin: Rapid Fabrication of Skin-Conformal Physiological Interfaces
Aditya Shekhar Nittala (Saarland University, Saarland Informatics Campus), Arshad Khan (Saarland University, Saarland Informatics Campus, INM-Leibniz Institute for New Materials, Saarbrücken), Klaus Kruttwig (INM-Leibniz Institute for New Materials, Saarbrücken), Tobias Kraus (INM-Leibniz Institute for New Materials, Saarbrücken), Jürgen Steimle (Saarland University)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{NittalaPhysioSkin,
title = {PhysioSkin: Rapid Fabrication of Skin-Conformal Physiological Interfaces},
author = {Aditya Shekhar Nittala (Saarland University, Saarland Informatics Campus) and Arshad Khan (Saarland University, Saarland Informatics Campus, INM-Leibniz Institute for New Materials, Saarbrücken) and Klaus Kruttwig (INM-Leibniz Institute for New Materials, Saarbrücken) and Tobias Kraus (INM-Leibniz Institute for New Materials, Saarbrücken) and Jürgen Steimle (Saarland University) },
url = {https://www.youtube.com/watch?v=qC9kKDN8aW8, Video},
doi = {10.1145/3313831.3376366},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Advances in rapid prototyping platforms have made physiological sensing accessible to a wide audience. However, off-the-shelf electrodes commonly used for capturing biosignals are typically thick, non-conformal and do not support customization. We present PhysioSkin, a rapid, do-it-yourself prototyping method for fabricating custom multi-modal physiological sensors, using commercial materials and a commodity desktop inkjet printer. It realizes ultrathin skin-conformal patches (∼1 µm) and interactive textiles that capture sEMG, EDA and ECG signals. It further supports fabricating devices with custom levels of thickness and stretchability. We present detailed fabrication explorations on multiple substrate materials, functional inks and skin adhesive materials. Informed from the literature, we also provide design recommendations for each of the modalities. Evaluation results show that the sensor patches achieve a high signal-to-noise ratio. Example applications demonstrate the functionality and versatility of our approach for prototyping the next generation of physiological devices that intimately couple with the human body.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Platform for Studying Self-Repairing Auto-Corrections in Mobile Text Entry based on Brain Activity, Gaze, and Context
Felix Putze (University of Bremen), Tilman Ihrig (University of Bremen), Tanja Schultz (University of Bremen), Wolfganz Stuerzlinger (Simon Fraser University)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{PutzePlatform,
title = {Platform for Studying Self-Repairing Auto-Corrections in Mobile Text Entry based on Brain Activity, Gaze, and Context},
author = {Felix Putze (University of Bremen) and Tilman Ihrig (University of Bremen) and Tanja Schultz (University of Bremen) and Wolfganz Stuerzlinger (Simon Fraser University)},
doi = {10.1145/3313831.3376815},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Auto-correction is a standard feature of mobile text entry. While the performance of state-of-the-art auto-correct methods is usually relatively high, any errors that occur are cumbersome to repair, interrupt the flow of text entry, and challenge the user's agency over the process. In this paper, we describe a system that aims to automatically identify and repair auto-correction errors. This system comprises a multi-modal classifier for detecting auto-correction errors from brain activity, eye gaze, and context information, as well as a strategy to repair such errors by replacing the erroneous correction or suggesting alternatives. We integrated both parts in a generic Android component and thus present a research platform for studying self-repairing end-to-end systems. To demonstrate its feasibility, we performed a user study to evaluate the classification performance and usability of our approach.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
PneumoVolley: Pressure-based Haptic Feedback on the Head through Pneumatic Actuation
Sebastian Günther (TU Darmstadt), Dominik Schön (TU Darmstadt), Florian Müller (TU Darmstadt), Max Mühlhäuser (TU Darmstadt), Martin Schmitz (TU Darmstadt)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{GuentherPneumo,
title = {PneumoVolley: Pressure-based Haptic Feedback on the Head through Pneumatic Actuation},
author = {Sebastian Günther (TU Darmstadt) and Dominik Schön (TU Darmstadt) and Florian Müller (TU Darmstadt) and Max Mühlhäuser (TU Darmstadt) and Martin Schmitz (TU Darmstadt)},
url = {https://youtu.be/ZKnV8HrUx9M , Video
https://twitter.com/search?q=%23teamdarmstadt&src=typed_query&f=live, Twitter},
doi = {10.1145/3334480.3382916},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Haptic Feedback brings immersion and presence in Virtual Reality (VR) to the next level. While research proposes the usage of various tactile sensations, such as vibration or ultrasound approaches, the potential applicability of pressure feedback on the head is still underexplored. In this paper, we contribute concepts and design considerations for pressure-based feedback on the head through pneumatic actuation. As a proof-of-concept implementing our pressure-based haptics, we further present PneumoVolley: a VR experience similar to the classic Volleyball game but played with the head. In an exploratory user study with 9 participants, we evaluated our concepts and identified a significantly increased involvement compared to a no-haptics baseline along with high realism and enjoyment ratings using pressure-based feedback on the head in VR.},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
Podoportation: Foot-Based Locomotion in Virtual Reality
Julius von Willich (TU Darmstadt), Martin Schmitz (TU Darmstadt), Florian Müller (TU Darmstadt), Daniel Schmitt (TU Darmstadt), Max Mühlhäuser (TU Darmstadt)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{WillichPodoportation,
title = {Podoportation: Foot-Based Locomotion in Virtual Reality},
author = {Julius von Willich (TU Darmstadt) and Martin Schmitz (TU Darmstadt) and Florian Müller (TU Darmstadt) and Daniel Schmitt (TU Darmstadt) and Max Mühlhäuser (TU Darmstadt)},
url = {https://youtu.be/HGP5MN_e-k0, Video
https://twitter.com/search?q=%23teamdarmstadt&src=typed_query&f=live, Twitter},
doi = {10.1145/3313831.3376626},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Virtual Reality (VR) allows for infinitely large environments. However, the physical traversable space is always limited by real-world boundaries. This discrepancy between physical and virtual dimensions renders traditional locomotion methods used in real world unfeasible. To alleviate these limitations, research proposed various artificial locomotion concepts such as teleportation, treadmills, and redirected walking. However, these concepts occupy the user's hands, require complex hardware or large physical spaces. In this paper, we contribute nine VR locomotion concepts for foot-based and hands-free locomotion, relying on the 3D position of the user's feet and the pressure applied to the sole as input modalities. We evaluate our concepts and compare them to state-of-the-art point & teleport technique in a controlled experiment with 20 participants. The results confirm the viability of our approaches for hands-free and engaging locomotion. Further, based on the findings, we contribute a wireless hardware prototype implementation.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
PolySense: Augmenting Textiles with Electrical Functionality using In-Situ Polymerization
Cedric Honnet (MIT Media Lab), Hannah Perner-Wilson (Kobakant), Marc Teyssier (Télécom Paris), Bruno Fruchard (Saarland University, SIC), Jürgen Steimle (Saarland University), Ana C. Baptista (CENIMAT/I3N) Paul Strohmeier (Saarland University)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{HonnetPolySense,
title = {PolySense: Augmenting Textiles with Electrical Functionality using In-Situ Polymerization},
author = {Cedric Honnet (MIT Media Lab) and Hannah Perner-Wilson (Kobakant) and Marc Teyssier (Télécom Paris) and Bruno Fruchard (Saarland University, SIC) and Jürgen Steimle (Saarland University) and Ana C. Baptista (CENIMAT/I3N) Paul Strohmeier (Saarland University)},
doi = {10.1145/3313831.33768},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {We present a method for enabling arbitrary textiles to sense pressure and deformation: In-situ polymerization supports integration of piezoresistive properties at the material level, preserving a textile's haptic and mechanical characteristics. We demonstrate how to enhance a wide set of fabrics and yarns using only readily available tools. To further support customisation by the designer, we present methods for patterning, as needed to create circuits and sensors, and demonstrate how to combine areas of different conductance in one material. Technical evaluation results demonstrate the performance of sensors created using our method is comparable to off-the-shelf piezoresistive textiles. As application examples, we demonstrate rapid manufacturing of on-body interfaces, tie-dyed motion-capture clothing, and zippers that act as potentiometers.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Power Play: How the Need to Empower or Overpower Other Players Predicts Preferences in League of Legends
Susanne Poeller (University of Trier), Nicola Baumann (University of Trier), Regan Mandryk (University of Saskatchewan)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Tags: Full Paper, Honorable Mention | Links:
@inproceedings{PoellerPower,
title = {Power Play: How the Need to Empower or Overpower Other Players Predicts Preferences in League of Legends},
author = {Susanne Poeller (University of Trier) and Nicola Baumann (University of Trier) and Regan Mandryk (University of Saskatchewan)},
doi = {10.1145/3313831.3376193},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Full Paper, Honorable Mention},
pubstate = {published},
tppubtype = {inproceedings}
}
Predicting Mid-Air Interaction Movements and Fatigue Using Deep Reinforcement Learning
Noshaba Cheema (Max-Planck Institute for Informatics / German Research Center for Artificial Intelligence (DFKI)), Laura Frey-Law (University of Iowa), Kourosh Naderi (Aalto University), Jaakko Lehtinen (Aalto University / NVIDIA Research), Philipp Slusallek (Saarland University / German Research Center for Artificial Intelligence (DFKI)), Perttu Hämäläinen (Aalto University)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{CheemaPredicting,
title = {Predicting Mid-Air Interaction Movements and Fatigue Using Deep Reinforcement Learning},
author = {Noshaba Cheema (Max-Planck Institute for Informatics / German Research Center for Artificial Intelligence (DFKI)) and Laura Frey-Law (University of Iowa) and Kourosh Naderi (Aalto University) and Jaakko Lehtinen (Aalto University / NVIDIA Research) and Philipp Slusallek (Saarland University / German Research Center for Artificial Intelligence (DFKI)) and Perttu Hämäläinen (Aalto University)},
doi = {10.1145/3313831.3376701},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {A common problem of mid-air interaction is excessive arm fatigue, known as the "Gorilla arm" effect. To predict and prevent such problems at a low cost, we investigate user testing of mid-air interaction without real users, utilizing biomechanically simulated AI agents trained using deep Reinforcement Learning (RL). We implement this in a pointing task and four experimental conditions, demonstrating that the simulated fatigue data matches human fatigue data. We also compare two effort models: 1) instantaneous joint torques commonly used in computer animation and robotics, and 2) the recent Three Compartment Controller (3CC-r) model from biomechanical literature. 3CC-r yields movements that are both more efficient and relaxed, whereas with instantaneous joint torques, the RL agent can easily generate movements that are quickly tiring or only reach the targets slowly and inaccurately. Our work demonstrates that deep RL combined with the 3CC-r provides a viable tool for predicting both interaction movements and user experience in silico, without users.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Quantification of Users' Visual Attention During Everyday Mobile Device Interactions
Mihai Bâce (ETH Zürich), Sander Staal (ETH Zürich), Andreas Bulling (University of Stuttgart)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{BaceQuantification,
title = {Quantification of Users' Visual Attention During Everyday Mobile Device Interactions},
author = {Mihai Bâce (ETH Zürich) and Sander Staal (ETH Zürich) and Andreas Bulling (University of Stuttgart)},
doi = {10.1145/3313831.3376449},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {We present the first real-world dataset and quantitative evaluation of visual attention of mobile device users in-situ, i.e. while using their devices during everyday routine. Understanding user attention is a core research challenge in mobile HCI but previous approaches relied on usage logs or self-reports that are only proxies and consequently do neither reflect attention completely nor accurately. Our evaluations are based on Everyday Mobile Visual Attention (EMVA) a new 32-participant dataset containing around 472 hours of video snippets recorded over more than two weeks in real life using the front-facing camera as well as associated usage logs, interaction events, and sensor data. Using an eye contact detection method, we are first to quantify the highly dynamic nature of everyday visual attention across users, mobile applications, and usage contexts. We discuss key insights from our analyses that highlight the potential and inform the design of future mobile attentive user interfaces.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Rapid Iron-On User Interfaces: Hands-on Fabrication of Interactive Textile Prototypes
Konstantin Klamka (Technische Universität Dresden), Raimund Dachselt (Technische Universität Dresden), Jürgen Steimle (Saarland University)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper, Honorable Mention | Links:
@inproceedings{KlamkaRapid,
title = {Rapid Iron-On User Interfaces: Hands-on Fabrication of Interactive Textile Prototypes},
author = {Konstantin Klamka (Technische Universität Dresden) and Raimund Dachselt (Technische Universität Dresden) and Jürgen Steimle (Saarland University)},
url = {https://youtu.be/FyPcMLBXIm0, Video
https://www.twitter.com/imldresden, Twitter},
doi = {10.1145/3313831.3376220},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
institution = {TU Dresden},
abstract = {Rapid prototyping of interactive textiles is still challenging, since manual skills, several processing steps, and expert knowledge are involved. We present Rapid Iron-On User Interfaces, a novel fabrication approach for empowering designers and makers to enhance fabrics with interactive functionalities. It builds on heat-activated adhesive materials consisting of smart textiles and printed electronics, which can be flexibly ironed onto the fabric to create custom interface functionality. To support rapid fabrication in a sketching-like fashion, we developed a handheld dispenser tool for directly applying continuous functional tapes of desired length as well as discrete patches. We introduce versatile compositions techniques that allow for creating complex circuits, utilizing commodity textile accessories and sketching custom-shaped I/O modules. We further contribute a comprehensive library of components for input, output, wiring and computing. Three example applications, results from technical experiments and expert reviews demonstrate the functionality, versatility and potential of this approach.},
keywords = {Full Paper, Honorable Mention},
pubstate = {published},
tppubtype = {inproceedings}
}
Recognizing Affiliation: Using Behavioural Traces to Predict the Quality of Social Interactions in Online Games
Julian Frommel (Ulm University / University of Saskatchewan), Valentin Sagl (University of Saskatchewan), Ansgar E. Depping (University of Saskatchewan), Colby Johanson (University of Saskatchewan), Matthew K. Miller (University of Saskatchewan), Regan L. Mandryk (University of Saskatchewan)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{FrommelRecognizing,
title = {Recognizing Affiliation: Using Behavioural Traces to Predict the Quality of Social Interactions in Online Games},
author = {Julian Frommel (Ulm University / University of Saskatchewan) and Valentin Sagl (University of Saskatchewan) and Ansgar E. Depping (University of Saskatchewan) and Colby Johanson (University of Saskatchewan) and Matthew K. Miller (University of Saskatchewan) and Regan L. Mandryk (University of Saskatchewan)},
url = {https://www.twitter.com/mi_uulm, Twitter},
doi = {10.1145/3313831.3376446},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Online social interactions in multiplayer games can be supportive and positive or toxic and harmful; however, few methods caneasily assess interpersonal interaction quality in games. We use behavioural traces to predict affiliation between dyadic strangers, facilitated through their social interactions in an online gaming setting. We collected audio, video, in-game, and self-report data from 23 dyads, extracted 75 features, trained Random Forest and Support VectorMachine models, and evaluated their performance predicting binary (high/low) as well as continuous affiliation toward a partner. The models can predict both binary and continuous affiliation with up to 79.1% accuracy (F1) and 20.1% explained variance (R2) on unseen data, with features based on verbal communication demonstrating the highest potential. Our findings can inform the design of multiplayer games and game communities, and guide the development of systems for matchmaking and mitigating toxic behaviour in online games.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Robustness of Eye Movement Biometrics Against Varying Stimuli and Varying Trajectory Length
Christoph Schröder (University of Bremen), Sahar Mahdie Klim Al Zaidawi (University of Bremen), Martin H.U. Prinzler (University of Bremen), Sebastian Maneth (University of Bremen), Gabriel Zachmann (University of Bremen)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{SchroederRobustness,
title = {Robustness of Eye Movement Biometrics Against Varying Stimuli and Varying Trajectory Length},
author = {Christoph Schröder (University of Bremen) and Sahar Mahdie Klim Al Zaidawi (University of Bremen) and Martin H.U. Prinzler (University of Bremen) and Sebastian Maneth (University of Bremen) and Gabriel Zachmann (University of Bremen)},
doi = {10.1145/3313831.3376534},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Recent results suggest that biometric identification based on human’s eye movement characteristics can be used for authentication. In this paper, we present three new methods and benchmark them against the state-of-the-art. The best of our new methods improves the state-of-the-art performance by 5.9 percentage points. Furthermore, we investigate some of the factors that affect the robustness of the recognition rate of different classifiers on gaze trajectories, such as the type of stimulus and the tracking trajectory length. We find that the state-of-the-art method only works well when using the same stimulus for testing that was used for training. By contrast, our novel method more than doubles the identification accuracy for these transfer cases. Furthermore, we find that with only 90 seconds of eye tracking data, 86.7 % accuracy can be achieved.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Sara, the Lecturer: Improving Learning in Online Education with a Scaffolding-Based Conversational Agent
Rainer Winkler (University of St. Gallen), Sebastian Hobert (University of Goettingen), Antti Salovaara (Aalto University), Matthias Söllner (University of Kassel), Jan Marco Leimeister (University of St. Gallen)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Tags: Full Paper | Links:
@inproceedings{WinklerSara,
title = {Sara, the Lecturer: Improving Learning in Online Education with a Scaffolding-Based Conversational Agent},
author = {Rainer Winkler (University of St. Gallen) and Sebastian Hobert (University of Goettingen) and Antti Salovaara (Aalto University) and Matthias Söllner (University of Kassel) and Jan Marco Leimeister (University of St. Gallen)},
doi = {10.1145/3313831.3376781},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
SelfSustainableCHI: Self-Powered Sustainable Interfaces and Interactions
Yogesh Kumar Meena (Swansea University), Xing-Dong Yang (Dartmouth College), Markus Löchtefeld (Aalborg University), Matt Carnie (Swansea University), Niels Henze (University of Regensburg), Steve Hodges (Microsoft Research), Matt Jones (Swansea University), Nivedita Arora (Georgia Institute of Technolgy), Gregory D. Abowd (Georgia Institute of Technology)
Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Workshop | Links:
@workshop{KumarSelf,
title = {SelfSustainableCHI: Self-Powered Sustainable Interfaces and Interactions},
author = {Yogesh Kumar Meena (Swansea University) and Xing-Dong Yang (Dartmouth College) and Markus Löchtefeld (Aalborg University) and Matt Carnie (Swansea University) and Niels Henze (University of Regensburg) and Steve Hodges (Microsoft Research) and Matt Jones (Swansea University) and Nivedita Arora (Georgia Institute of Technolgy) and Gregory D. Abowd (Georgia Institute of Technology)},
doi = {10.1145/3334480.3375167},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {The continued proliferation of computing devices comes with an ever-increasing energy requirement, both during production and use. As awareness of the global climate emergency increases, self-powered and sustainable (SelfSustainable) interactive devices are likely to play a valuable role. In this workshop we bring together researchers and practitioners from design, computer science, materials science, engineering and manufacturing industries working on this new area of endeavour. The workshop will provide a platform for participants to review and discuss challenges and opportunities associated with self-powered and sustainable interfaces and interactions, develop a design space and identify opportunities for future research.},
keywords = {Workshop},
pubstate = {published},
tppubtype = {workshop}
}
Should I Stay or Should I Go? Automated Vehicles in the Age of Climate Change
Shadan Sadeghian Borojeni (University of Siegen), Alexander Meschtscherjakov (University of Salzburg), Bastian Pfleging (Eindhoven University of Technology), Birsen Donmez (University of Toronto), Andreas Riener (Technische Hochschule Ingolstadt (THI)), Christian P. Janssen (Utrecht University), Andrew L. Kun (University of New Hampshire), Wendy Ju (Cornell Tech), Christian Remy (Aarhus University), Philipp Wintersberger (Technische Hochschule Ingolstadt (THI))
Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Workshop | Links:
@workshop{BorojeniShould,
title = {Should I Stay or Should I Go? Automated Vehicles in the Age of Climate Change},
author = {Shadan Sadeghian Borojeni (University of Siegen) and Alexander Meschtscherjakov (University of Salzburg) and Bastian Pfleging (Eindhoven University of Technology) and Birsen Donmez (University of Toronto) and Andreas Riener (Technische Hochschule Ingolstadt (THI)) and Christian P. Janssen (Utrecht University) and Andrew L. Kun (University of New Hampshire) and Wendy Ju (Cornell Tech) and Christian Remy (Aarhus University) and Philipp Wintersberger (Technische Hochschule Ingolstadt (THI))},
doi = {10.1145/3334480.3375162},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Will automated driving help or hurt our efforts to remedy climate change? The overall impact of transportation and mobility on the global ecosystem is clear: changes to that system can greatly affect climate outcomes. The design of mobility and automotive systems will influence key factors such as driving style, fuel choice, ride sharing, traffic patterns, and total mileage. However, to date, there are few research efforts that explicitly focus on these overlapping themes (automated driving & climate changes) within the HCI and AutomotiveUI communities. Our intention is to grow this community and awareness of the related problems. Specifically, in this workshop, we invite designers, researchers, and practitioners from the sustainable HCI, persuasive design, AutomotiveUI, and mobility communities to collaborate in finding ways to make future mobility more tainable. Using embodied design improvisation and design fiction methods, we will explore the ways that systems affect behavior which then affect the environment.},
keywords = {Workshop},
pubstate = {published},
tppubtype = {workshop}
}
SIGCHI Outstanding Dissertation Award: Shaping Material Experiences
Paul Strohmeier (University of Copenhagen / Saarland University, Saarland Informatics Campus)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Other | Links:
@inproceedings{StrohmeierSigchi,
title = {SIGCHI Outstanding Dissertation Award: Shaping Material Experiences},
author = {Paul Strohmeier (University of Copenhagen / Saarland University, Saarland Informatics Campus)},
doi = {10.1145/3334480.3386152},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {When interacting with materials, we infer many of their properties through tactile stimuli. These stimuli are caused by our manual interaction with the material, they are therefore closely coupled to our actions. Similarly, if we are subjected to a vibrotactile stimulus with a frequency directly coupled to our actions, we do not experience vibration – instead we experience this as a material property. My thesis explores this phenomenon of ‘material experience’ in three parts. Part I contributes two novel devices, a flexible phone which provides haptic feedback as it is being deformed, and a system which can track a finger and simultaniously provide haptic feedback. Part II investigates how vibration is perceived, when coupled to motion: what are the effects of varying feedback parameters and what are the effects of different types of motion? Part III reflects and contextualizes the findings presented in the previous sections.
In this extended abstract I briefly outline the most important aspects of my thesis and questions I've left unanswered, while also reflecting on the writing process.},
keywords = {Other},
pubstate = {published},
tppubtype = {inproceedings}
}
In this extended abstract I briefly outline the most important aspects of my thesis and questions I've left unanswered, while also reflecting on the writing process.
Simo: Interactions with Distant Displays by Smartphones with Simultaneous Face and World Tracking
Teo Babic (BMW Group), Florian Perteneder (University of Applied Sciences Upper Austria), Harald Reiterer (University of Konstanz), Michael Haller (University of Applied Sciences Upper Austria)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{BabicSimo,
title = {Simo: Interactions with Distant Displays by Smartphones with Simultaneous Face and World Tracking},
author = {Teo Babic (BMW Group) and Florian Perteneder (University of Applied Sciences Upper Austria) and Harald Reiterer (University of Konstanz) and Michael Haller (University of Applied Sciences Upper Austria)},
url = {https://www.twitter.com/HCIGroupKN, Twitter},
doi = {10.1145/3334480.3382962},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {The interaction with distant displays often demands complex, multi-modal inputs which need to be achieved with a very simple hardware solution so that users can perform rich inputs wherever they encounter a distant display. We present Simo, a novel approach, that transforms a regular smartphone into a highly-expressive user motion tracking device and controller for distant displays. Both the front and back cameras of the smartphone are used simultaneously to track the user’s hand as well as the head, and body movements in real-world space and scale. In this work, we first define the possibilities for simultaneous face- and world-tracking using current off-the-shelf smartphones. Next, we present the implementation of a smartphone app enabling hand, head, and body motion tracking. Finally, we present a technical analysis outlining the possible tracking range.},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
Social Acceptability in HCI: A Survey of Methods, Measures, and Design Strategies
Marion Koelle (University of Oldenburg / Saarland University, Saarland Informatics Campus), Swamy Ananthanarayan (University of Oldenburg), Susanne Boll (University of Oldenburg)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper, Honorable Mention | Links:
@inproceedings{KoelleSocial,
title = {Social Acceptability in HCI: A Survey of Methods, Measures, and Design Strategies},
author = {Marion Koelle (University of Oldenburg / Saarland University, Saarland Informatics Campus) and Swamy Ananthanarayan (University of Oldenburg) and Susanne Boll (University of Oldenburg)},
url = {https://www.twitter.com/hcioldenburg, Twitter},
doi = {10.1145/3313831.3376162},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {With the increasing ubiquity of personal devices, social acceptability of human-machine interactions has gained relevance and growing interest from the HCI community. Yet, there are no best practices or established methods for evaluating social acceptability. Design strategies for increasing social acceptability have been described and employed, but so far not been holistically appraised and evaluated. We offer a systematic literature analysis (N=69) of social acceptability in HCI and contribute a better understanding of current research practices, namely, methods employed, measures and design strategies. Our review identified an unbalanced distribution of study approaches, shortcomings in employed measures, and a lack of interweaving between empirical and artifact-creating approaches. The latter causes a discrepancy between design recommendations based on user research, and design strategies employed in artifact creation. Our survey lays the groundwork for a more nuanced evaluation of social acceptability, the development of best practices, and a future research agenda.},
keywords = {Full Paper, Honorable Mention},
pubstate = {published},
tppubtype = {inproceedings}
}
Social Technology Appropriation in Dementia: Investigating the Role of Caregivers in engaging People with Dementia with a Videogame-based Training System
David Unbehaun (Uni Siegen), Konstantin Aal (Uni Siegen), Daryoush Daniel Vaziri (Hochschue Bonn-Rhein-Siegen), Peter David Tolmie (Uni Siegen), Rainer Wieching (Uni Siegen), David Randall (Uni Siegen), Volker Wulf (Uni Siegen)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{UnbehaunSocial,
title = {Social Technology Appropriation in Dementia: Investigating the Role of Caregivers in engaging People with Dementia with a Videogame-based Training System},
author = {David Unbehaun (Uni Siegen) and Konstantin Aal (Uni Siegen) and Daryoush Daniel Vaziri (Hochschue Bonn-Rhein-Siegen) and Peter David Tolmie (Uni Siegen) and Rainer Wieching (Uni Siegen) and David Randall (Uni Siegen) and Volker Wulf (Uni Siegen)},
doi = {10.1145/3313831.3376648},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {We present here the outcomes of a 4-month evaluation of the individual, social and institutional impact of a videogame-based training system. The everyday behavior and interactions of 52 PwD and 25 caregivers was studied qualitatively, focusing on the role played by caregivers in integrating the system into daily routines.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
TAGSwipe: Touch Assisted Gaze Swipe for Text Entry
Chandan Kumar (University of Koblenz), Ramin Hedeshy (University of Koblenz), Scott MacKenzie (York University), Steffen Staab (University of Stuttgart)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{KumarTagswipe,
title = {TAGSwipe: Touch Assisted Gaze Swipe for Text Entry},
author = {Chandan Kumar (University of Koblenz) and Ramin Hedeshy (University of Koblenz) and Scott MacKenzie (York University) and Steffen Staab (University of Stuttgart)},
url = {https://www.twitter.com/AnalyticComp},
doi = {10.1145/3313831.3376317},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {The conventional dwell-based methods for text entry by gaze are typically slow and uncomfortable. A swipe-based method that maps gaze path into words offers an alternative. However, it requires the user to explicitly indicate the beginning and ending of a word, which is typically achieved by tedious gaze-only selection. This paper introduces TAGSwipe, a bi-modal method that combines the simplicity of touch with the speed of gaze for swiping through a word. The result is an efficient and comfortable dwell-free text entry method. In the lab study TAGSwipe achieved an average text entry rate of 15.46 wpm and significantly outperformed conventional swipe-based and dwell-based methods in efficacy and user satisfaction.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Telewalk: Towards Free and Endless Walking in Room-Scale Virtual Reality
Michael Rietzler (Ulm University), Martin Deubzer (Ulm University), Thomas Dreja (Ulm University), Enrico Rukzio (Ulm University)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{RietzlerTelewalk,
title = {Telewalk: Towards Free and Endless Walking in Room-Scale Virtual Reality},
author = {Michael Rietzler (Ulm University) and Martin Deubzer (Ulm University) and Thomas Dreja (Ulm University) and Enrico Rukzio (Ulm University)},
url = {https://www.twitter.com/mi_uulm, Twitter},
doi = {10.1145/3313831.3376821},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Natural navigation in VR is challenging due to spatial limitations. While Teleportation enables navigation within very small physical spaces and without causing motion sickness symptoms, it may reduce the feeling of presence and spacial awareness. Redirected walking (RDW), in contrast, allows users to naturally walk while staying inside a finite, but still very large, physical space. We present Telewalk, a novel locomotion approach that combines curvature and translation gains known from RDW research in a perceivable way. This combination enables Telewalk to be applied even within a physical space of 3m x 3m. Utilizing the head rotation as input device enables directional changes without any physical turns to keep the user always on an optimal circular path inside the real world while freely walking inside the virtual one. In a user study we found that even though motion sickness susceptible participants reported respective symptoms, Telewalk did result in stronger feelings of presence and immersion and was seen as more natural then Teleportation.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
The Importance of Virtual Hands and Feet for Virtual Reality Climbing
Felix Kosmalla (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus), André Zenner (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus), Corinna Tasch (Saarland Informatics Campus), Florian Daiber (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus), Antonio Krüger (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{KosmallaImportance,
title = {The Importance of Virtual Hands and Feet for Virtual Reality Climbing},
author = {Felix Kosmalla (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus) and André Zenner (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus) and Corinna Tasch (Saarland Informatics Campus) and Florian Daiber (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus) and Antonio Krüger (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus)},
doi = {10.1145/3334480.3383067},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Virtual reality (VR) climbing systems registering physical climbing walls with immersive virtual environments (IVEs) have been a focus of past research. Such systems can provide physical user experiences similar to climbing in (extreme) outdoor environments. While in the real world, climbers can always see their hands and feet, virtual representations of limbs need to be spatially tracked and accurately rendered in VR, which increases system complexity. In this work, we investigated the importance of integrating virtual representations of the climber's hands and/or feet in VR climbing systems. We present a basic solution to track, calibrate and represent the climber's hands and feet, and report the results of a user study, comparing the importance of virtual limb representations in terms of perceived hand and feet movement accuracy, and enjoyability of the VR climbing experience. Our study suggests that the inclusion of feet is more important than having a hand visualization.},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
The Low/High Index of Pupillary Activity
Andrew Duchowski (Clemson University), Krzysztof Krejtz (SWPS University of Social Sciences, Humanities), Nina Gehrer (University of Tübingen), Tanya Bafna (Technical University of Denmark), Per Bækgaard (Technical University of Denmark)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Tags: Full Paper | Links:
@inproceedings{DuchowskiLowHigh,
title = {The Low/High Index of Pupillary Activity},
author = {Andrew Duchowski (Clemson University) and Krzysztof Krejtz (SWPS University of Social Sciences and Humanities) and Nina Gehrer (University of Tübingen) and Tanya Bafna (Technical University of Denmark) and Per Bækgaard (Technical University of Denmark)},
doi = {10.1145/3313831.3376394},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
The Role of Eye Gaze in Security and Privacy Applications: Survey and Future HCI Research Directions
Christina Katsini (Human Opsis), Yasmeen Abdrabou (Bundeswehr University Munich), George E. Raptis (Human Opsis), Mohamed Khamis (University of Glasgow), Florian Alt (Bundeswehr University Munich)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper, Honorable Mention | Links:
@inproceedings{KatsiniTheRole,
title = {The Role of Eye Gaze in Security and Privacy Applications: Survey and Future HCI Research Directions},
author = {Christina Katsini (Human Opsis) and Yasmeen Abdrabou (Bundeswehr University Munich) and George E. Raptis (Human Opsis) and Mohamed Khamis (University of Glasgow) and Florian Alt (Bundeswehr University Munich)},
doi = {10.1145/3313831.3376840},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {For the past 20 years, researchers have investigated the use of eye tracking in security applications. We present a holistic view on gaze-based security applications. In particular, we canvassed the literature and classify the utility of gaze in security applications into a) authentication, b) privacy protection, and c) gaze monitoring during security critical tasks. This allows us to chart several research directions, most importantly 1) conducting field studies of implicit and explicit gaze-based authentication due to recent advances in eye tracking, 2) research on gaze-based privacy protection and gaze monitoring in security critical tasks which are under-investigated yet very promising areas, and 3) understanding the privacy implications of pervasive eye tracking. We discuss the most promising opportunities and most pressing challenges of eye tracking for security that will shape research in gaze-based security applications for the next decade.},
keywords = {Full Paper, Honorable Mention},
pubstate = {published},
tppubtype = {inproceedings}
}
ThermalWear: Exploring Wearable On-chest Thermal Displays to Augment Voice Messages with Affect
Abdallah El Ali (Centrum Wiskunde Informatica), Xingyu Yang (Delft University of Technology), Swamy Ananthanarayan (University of Oldenburg), Thomas Röggla (Centrum Wiskunde Informatica), Jack Jansen (Centrum Wiskunde Informatica), Jess Hartcher-O’Brien (Delft University of Technology), Kaspar Jansen (Delft University of Technology), Pablo Cesar (Centrum Wiskunde Informatica / Delft University of Technology)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{AliThermal,
title = {ThermalWear: Exploring Wearable On-chest Thermal Displays to Augment Voice Messages with Affect},
author = {Abdallah El Ali (Centrum Wiskunde Informatica) and Xingyu Yang (Delft University of Technology) and Swamy Ananthanarayan (University of Oldenburg) and Thomas Röggla (Centrum Wiskunde Informatica) and Jack Jansen (Centrum Wiskunde Informatica) and Jess Hartcher-O’Brien (Delft University of Technology) and Kaspar Jansen (Delft University of Technology) and Pablo Cesar (Centrum Wiskunde Informatica / Delft University of Technology)},
doi = {10.1145/3313831.3376682},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Voice is a rich modality for conveying emotions, however emotional prosody production can be situationally or medically impaired. Since thermal displays have been shown to evoke emotions, we explore how thermal stimulation can augment perception of neutrally-spoken voice messages with affect. We designed ThermalWear, a wearable on-chest thermal display, then tested in a controlled study (N=12) the effects of fabric, thermal intensity, and direction of change. Thereafter, we synthesized 12 neutrally-spoken voice messages, validated (N=7) them, then tested (N=12) if thermal stimuli can augment their perception with affect. We found warm and cool stimuli (a) can be perceived on the chest, and quickly without fabric (4.7-5s) (b) do not incur discomfort (c) generally increase arousal of voice messages and (d) increase / decrease message valence, respectively. We discuss how thermal displays can augment voice perception, which can enhance voice assistants and support individuals with emotional prosody impairments.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Therminator: Understanding the Interdependency of Visual and On-Body Thermal Feedback in Virtual Reality
Sebastian Günther (TU Darmstadt), Florian Müller (TU Darmstadt), Dominik Schön (TU Darmstadt), Omar Elmoghazy (GUC), Martin Schmitz (TU Darmstadt), Max Mühlhäuser (TU Darmstadt)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{GuentherTherminator,
title = {Therminator: Understanding the Interdependency of Visual and On-Body Thermal Feedback in Virtual Reality},
author = {Sebastian Günther (TU Darmstadt) and Florian Müller (TU Darmstadt) and Dominik Schön (TU Darmstadt) and Omar Elmoghazy (GUC) and Martin Schmitz (TU Darmstadt) and Max Mühlhäuser (TU Darmstadt)},
url = {https://www.youtube.com/watch?v=w9FnG1eoWD8&feature=youtu.be, Video
https://twitter.com/search?q=%23teamdarmstadt&src=typed_query&f=live, Twitter},
doi = {10.1145/3313831.3376195},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Recent advances have made Virtual Reality (VR) more realistic than ever before. This improved realism is attributed to today's ability to increasingly appeal to human sensations, such as visual, auditory or tactile. While research also examines temperature sensation as an important aspect, the interdependency of visual and thermal perception in VR is still underexplored. In this paper, we propose Therminator, a thermal display concept that provides warm and cold on-body feedback in VR through heat conduction of flowing liquids with different temperatures. Further, we systematically evaluate the interdependency of different visual and thermal stimuli on the temperature perception of arm and abdomen with 25 participants. As part of the results, we found varying temperature perception depending on the stimuli, as well as increasing involvement of users during conditions with matching stimuli.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
TimeBOMB: An Interactive Game Station Showcasing the History of Computer Games
Severin Engert (Technische Universität Dresden), Remke Albrecht (Technische Universität Dresden), Constantin Amend (Technische Universität Dresden), Felix Meyer (Technische Universität Dresden)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Other | Links:
@inproceedings{EngertTime,
title = {TimeBOMB: An Interactive Game Station Showcasing the History of Computer Games},
author = {Severin Engert (Technische Universität Dresden) and Remke Albrecht (Technische Universität Dresden) and Constantin Amend (Technische Universität Dresden) and Felix Meyer (Technische Universität Dresden)},
url = {https://www.youtube.com/watch?v=NbUSgDcfgzA, Video
https://www.twitter.com/imldresden, Twitter},
doi = {10.1145/3334480.3381658},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {We present TimeBOMB, an interactive game station that enables players to experience the history of computers and video game development. They compete with each other playing an adaption of the classic game Bomberman. As a novelty, each of the four sides of the station represents a different time period with corresponding input and output modalities. They consist of an oscilloscope interface with self-made, analogue control dials, a text-based interface controlled by a keyboard, a 2D arcade interface controlled by a joystick, and a 3D interface controlled by a gamepad. These four styles resemble iconic examples from the history of computer games. The game’s art style also differs for each side accordingly.},
keywords = {Other},
pubstate = {published},
tppubtype = {inproceedings}
}
Toward Agile Situated Visualization: An Exploratory User Study
Leonel Merino (University of Stuttgart), Boris Sotomayor-Gómez (Ernst Strüngmann Institute for Neuroscience in Cooperation with Max Planck Society), Xingyao Yu (University of Stuttgart), Ronie Salgado (UNiversity of Chile), Alexandre Bergel (University of Chile), Michael Sedlmair (University of Stuttgart), Daniel Weiskopf (University of Stuttgart)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{MerinoToward,
title = {Toward Agile Situated Visualization: An Exploratory User Study},
author = {Leonel Merino (University of Stuttgart) and Boris Sotomayor-Gómez (Ernst Strüngmann Institute for Neuroscience in Cooperation with Max Planck Society) and Xingyao Yu (University of Stuttgart) and Ronie Salgado (UNiversity of Chile) and Alexandre Bergel (University of Chile) and Michael Sedlmair (University of Stuttgart) and Daniel Weiskopf (University of Stuttgart)},
doi = {10.1145/3334480.3383017},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {We introduce AVAR, a prototypical implementation of an agile situated visualization (SV) toolkit targeting liveness, integration, and expressiveness. We report on results of an exploratory study with AVAR and seven expert users. In it, participants wore a Microsoft HoloLens device and used a Bluetooth keyboard to program a visualization script for a given dataset. To support our analysis, we (i) video recorded sessions, (ii) tracked users' interactions, and (iii) collected data of participants' impressions. Our prototype confirms that agile SV is feasible. That is, liveness boosted participants' engagement when programming an SV, and so, the sessions were highly interactive and participants were willing to spend much time using our toolkit (i.e., median ≥ 1.5 hours). Participants used our integrated toolkit to deal with data transformations, visual mappings, and view transformations without leaving the immersive environment. Finally, participants benefited from our expressive toolkit and employed multiple of the available features when programming an SV.",},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
Towards a Design Space for External Communication of Autonomous Vehicles
Mark Colley (Ulm University), Enrico Rukzio (Ulm University)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{ColleyTowardsADesignSpace,
title = {Towards a Design Space for External Communication of Autonomous Vehicles},
author = {Mark Colley (Ulm University) and Enrico Rukzio (Ulm University)},
url = {https://www.twitter.com/mi_uulm, Twitter},
doi = {10.1145/3334480.3382844},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {"Autonomous vehicles are about to enter the mass market
and with it a complex socio-technical system including
vulnerable road users such as pedestrians and cyclists.
Communication from autonomous vehicles to vulnerable
road users can ease the introduction of and aids in understanding
the intention of these. Various modalities
and messages to communicate have been proposed and
evaluated. However, a concise design space building on
work from communication theory is yet to be presented.
Therefore, we want to share our work on such a design
space consisting of 4 dimensions: Message Type, Modality,
Locus, and Communication Participants."},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
and with it a complex socio-technical system including
vulnerable road users such as pedestrians and cyclists.
Communication from autonomous vehicles to vulnerable
road users can ease the introduction of and aids in understanding
the intention of these. Various modalities
and messages to communicate have been proposed and
evaluated. However, a concise design space building on
work from communication theory is yet to be presented.
Therefore, we want to share our work on such a design
space consisting of 4 dimensions: Message Type, Modality,
Locus, and Communication Participants."
Towards Inclusive External Communication of Autonomous Vehicles for Pedestrians with Vision Impairments
Mark Colley (Ulm University), Marcel Walch (Ulm University), Jan Gugenheimer (Ulm University), Ali Askari (Ulm University), Enrico Rukzio (Ulm University)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper, Honorable Mention | Links:
@inproceedings{ColleyTowards,
title = {Towards Inclusive External Communication of Autonomous Vehicles for Pedestrians with Vision Impairments},
author = {Mark Colley (Ulm University) and Marcel Walch (Ulm University) and Jan Gugenheimer (Ulm University) and Ali Askari (Ulm University) and Enrico Rukzio (Ulm University)},
url = {https://youtu.be/1L7zTJ86PE8, Video
https://www.twitter.com/mi_uulm, Twitter},
doi = {10.1145/3313831.3376472},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {People with vision impairments (VIP) are among the most vulnerable road users in traffic. Autonomous vehicles are believed to reduce accidents but still demand some form of external communication signaling relevant information to pedestrians. Recent research on the design of vehicle-pedestrian communication (VPC) focuses strongly on concepts for a non-disabled population. Our work presents an inclusive user-centered design for VPC, beneficial for both vision impaired and seeing pedestrians. We conducted a workshop with VIP (N=6), discussing current issues in road traffic and comparing communication concepts proposed by literature. A thematic analysis unveiled two important themes: number of communicating vehicles and content (affecting duration). Subsequently, we investigated these in a second user study in virtual reality (N=33, 8 VIP) comparing the VPC between groups of abilities. We found that trust and understanding is enhanced and cognitive load reduced when all relevant vehicles communicate; high content messages also reduce cognitive load.},
keywords = {Full Paper, Honorable Mention},
pubstate = {published},
tppubtype = {inproceedings}
}
Towards Progress Assessment for Adaptive Hints in Educational Virtual Reality Games
Tobias Drey (Ulm University), Pascal Jansen (Ulm University), Fabian Fischbach (Ulm University), Julian Frommel (University of Saskatchewan), Enrico Rukzio (Ulm University)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{DreyTowards,
title = {Towards Progress Assessment for Adaptive Hints in Educational Virtual Reality Games},
author = {Tobias Drey (Ulm University) and Pascal Jansen (Ulm University) and Fabian Fischbach (Ulm University) and Julian Frommel (University of Saskatchewan) and Enrico Rukzio (Ulm University)},
url = {https://youtu.be/3uW-NBEatTg, Video
https://www.twitter.com/mi_uulm, Twitter},
doi = {10.1145/3334480.3382789},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {One strength of educational games is their adaptivity to the individual learning progress. Methods to assess progress during gameplay are limited, especially in virtual reality (VR) settings, which show great potential for educational games because of their high immersion. We propose the concept of adaptive hints using progress assessment based on player behavior tracked through a VR-system's tracking capabilities. We implemented Social Engineer, a VR-based educational game teaching basic knowledge about social engineering (SE). In two user studies, we will evaluate the performance of the progress assessment and the effects of the intervention through adaptive hints on the players' experience and learning effects. This research can potentially benefit researchers and practitioners, who want to assess progress in educational games and leverage the real-time assessment for adaptive hint systems with the potential of improved player experience and learning outcomes.},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
Towards Wearable-based Hypoglycemia Detection and Warning in Diabetes
Martin Maritsch (Eidgenössische Technische Hochschule Zurich), Simon Föll (Karlsruhe Institute of Technology), Vera Lehmann (Inselspital, Bern University Hospital), Caterina Bérubé (Eidgenössische Technische Hochschule Zurich), Mathias Kraus (Eidgenössische Technische Hochschule Zurich), Stefan Feuerriegel (Eidgenössische Technische Hochschule Zurich), Tobias Kowatsch (University of St.Gallen / Eidgenössische Technische Hochschule Zurich), Thomas Züger (Inselspital, Bern University Hospital), Christoph Stettler (Inselspital, Bern University Hospital), Elgar Fleisch (Eidgenössische Technische Hochschule Zurich / University of St. Gallen), Felix Wortmann (University of St. Gallen)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Tags: Late Breaking Work | Links:
@inproceedings{MaritschTowards,
title = {Towards Wearable-based Hypoglycemia Detection and Warning in Diabetes},
author = {Martin Maritsch (Eidgenössische Technische Hochschule Zurich) and Simon Föll (Karlsruhe Institute of Technology) and Vera Lehmann (Inselspital, Bern University Hospital) and Caterina Bérubé (Eidgenössische Technische Hochschule Zurich) and Mathias Kraus (Eidgenössische Technische Hochschule Zurich) and Stefan Feuerriegel (Eidgenössische Technische Hochschule Zurich) and Tobias Kowatsch (University of St.Gallen / Eidgenössische Technische Hochschule Zurich) and Thomas Züger (Inselspital, Bern University Hospital) and Christoph Stettler (Inselspital, Bern University Hospital) and Elgar Fleisch (Eidgenössische Technische Hochschule Zurich / University of St. Gallen) and Felix Wortmann (University of St. Gallen)},
doi = {10.1145/3334480.3382808},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
TRACTUS: Understanding and Supporting Source Code Experimentation in Hypothesis-Driven Data Science
Krishna Subramanian (RWTH), Johannes Maas (RWTH), Jan Borchers (RWTH)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{SubramanianTractus,
title = {TRACTUS: Understanding and Supporting Source Code Experimentation in Hypothesis-Driven Data Science},
author = {Krishna Subramanian (RWTH) and Johannes Maas (RWTH) and Jan Borchers (RWTH)},
url = {https://youtu.be/iP0aW731MUQ, Video},
doi = {10.1145/3313831.3376764},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Data scientists experiment heavily with their code, compromising code quality to obtain insights faster. We observed ten data scientists perform hypothesis-driven data science tasks, and analyzed their coding, commenting, and analysis practice. We found that they have difficulty keeping track of their code experiments. When revisiting exploratory code to write production code later, they struggle to retrace their steps and capture the decisions made and insights obtained, and have to rerun code frequently. To address these issues, we designed TRACTUS, a system extending the popular RStudio IDE, that detects, tracks, and visualizes code experiments in hypothesis-driven data science tasks. TRACTUS helps recall decisions and insights by grouping code experiments into hypotheses, and structuring information like code execution output and documentation. Our user studies show how TRACTUS improves data scientists' workflows, and suggest additional opportunities for improvement. TRACTUS is available as an open source RStudio IDE addin at http://hci.rwth-aachen.de/tractus.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Transparency of CHI Research Artifacts: Results of a Self-Reported Survey
Chat Wacharamanotham (University of Zürich), Lukas Eisenring (University of Zürich), Steve Haroz (Université Paris-Saclay, Inria), Florian Echtler (Bauhaus-Universität Weimar)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Tags: Best Paper, Full Paper | Links:
@inproceedings{WacharamanothamTransparency,
title = {Transparency of CHI Research Artifacts: Results of a Self-Reported Survey},
author = {Chat Wacharamanotham (University of Zürich) and Lukas Eisenring (University of Zürich) and Steve Haroz (Université Paris-Saclay, Inria) and Florian Echtler (Bauhaus-Universität Weimar)},
doi = {10.1145/3313831.3376448},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Best Paper, Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Trust versus Privacy: Using Connected Car Data in Peer-to-Peer Carsharing
Paul Bossauer (University of Siegen), Thomas Neifer (Hochschule Bonn-Rhein-Sieg Sankt Augustin), Gunnar Stevens (University of Siegen), Christina Pakusch (University of Siegen)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{BossauerTrust,
title = {Trust versus Privacy: Using Connected Car Data in Peer-to-Peer Carsharing},
author = {Paul Bossauer (University of Siegen) and Thomas Neifer (Hochschule Bonn-Rhein-Sieg Sankt Augustin) and Gunnar Stevens (University of Siegen) and Christina Pakusch (University of Siegen)},
doi = {10.1145/3313831.3376555},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Trust is the lubricant of the sharing economy. This is true especially in peer-to-peer carsharing, in which one leaves a highly valuable good to a stranger in the hope of getting it back unscathed. Nowadays, ratings of other users are major mechanisms for establishing trust. To foster uptake of peer-to-peer carsharing, connected car technology opens new possibilities to support trust-building, e.g., by adding driving behavior statistics to users’ profiles. However, collecting such data intrudes into rentees' privacy. To explore the tension between the need for trust and privacy demands, we conducted three focus group and eight individual interviews. Our results show that connected car technologies can increase trust for car owners and rentees not only before but also during and after rentals. The design of such systems must allow a differentiation between information in terms of type, the context, and the negotiability of information disclosure.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Understanding the Heisenberg Effect of Spatial Interaction: A Selection Induced Error for Spatially Tracked Input Devices
Dennis Wolf (Ulm University), Jan Gugenheimer (Ulm University), Marco Combosch (Ulm University), Enrico Rukzio (Ulm University)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{WolfUnderstanding,
title = {Understanding the Heisenberg Effect of Spatial Interaction: A Selection Induced Error for Spatially Tracked Input Devices},
author = {Dennis Wolf (Ulm University) and Jan Gugenheimer (Ulm University) and Marco Combosch (Ulm University) and Enrico Rukzio (Ulm University)},
url = {https://www.twitter.com/mi_uulm, Twitter},
doi = {10.1145/3313831.3376876},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Virtual and augmented reality head-mounted displays (HMDs) are currently heavily relying on spatially tracked input devices (STID) for interaction. These STIDs are all prone to the phenomenon that a discrete input (e.g., button press) will disturb the position of the tracker, resulting in a different selection point during ray-cast interaction (Heisenberg Effect of Spatial Interaction). Besides the knowledge of its existence, there is currently a lack of a deeper understanding of its severity, structure and impact on throughput and angular error during a selection task. In this work, we present a formal evaluation of the Heisenberg effect and the impact of body posture, arm position and STID degrees of freedom on its severity. In a Fitt’s law inspired user study (N=16), we found that the Heisenberg effect is responsible for 30.45% of the overall errors occurring during a pointing task, but can be reduced by 25.4% using a correction function.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Understanding what you feel: A Mobile Audio-Tactile System for Graphics Used at Schools with Students with Visual Impairment
Giuseppe Melfi (KIT-SZS), Karin Müller (KIT-SZS), Thorsten Schwarz (KIT-SZS), Gerhard Jaworek (KIT-SZS), Rainer Stiefelhagen (KIT-SZS)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{MelfiUnderstanding,
title = {Understanding what you feel: A Mobile Audio-Tactile System for Graphics Used at Schools with Students with Visual Impairment},
author = {Giuseppe Melfi (KIT-SZS) and Karin Müller (KIT-SZS) and Thorsten Schwarz (KIT-SZS) and Gerhard Jaworek (KIT-SZS) and Rainer Stiefelhagen (KIT-SZS)},
doi = {10.1145/3313831.3376508},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {A lot of information is nowadays presented graphically. However, students with blindness do not have access to visual information. Providing an alternative text is not always the appropriate solution as exploring graphics to discover information independently is a fundamental part of the learning process. In this work, we introduce a mobile audio-tactile learning environment, which facilitates the incorporation of real educational material. We evaluate our system by comparing three methods of interaction with tactile graphics: A tactile graphic augmented by (1) a document with key index information in Braille, (2) a digital document with key index information and (3) the TPad system, an audio-tactile solution meeting the specific needs within the school context. Our study shows that the TPad system is suitable for educational environments. Moreover, compared to the other methods TPad is faster to explore tactile graphics and it suggests a promising effect on the memorization of information.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Unveiling the Lack of Scalability in Research on External Communication of Autonomous Vehicles
Mark Colley (Ulm University), Marcel Walch (Ulm University), Enrico Rukzio (Ulm University)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{ColleyUnveiling,
title = {Unveiling the Lack of Scalability in Research on External Communication of Autonomous Vehicles},
author = {Mark Colley (Ulm University) and Marcel Walch (Ulm University) and Enrico Rukzio (Ulm University)},
url = {https://www.twitter.com/mi_uulm, Twitter},
doi = {10.1145/3334480.3382865},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {The traffic system is a complex network with numerous individuals (e.g., drivers, cyclists, and pedestrians) and vehicles involved. Road systems vary in various aspects such as the number of lanes, right of way, and configuration. With the emergence of autonomous vehicles, this system will change. Research has already addressed the missing communication possibilities when no human driver is needed. However, there is no common evaluation standard for the proposed external communication concept with respect to the complexity of the traffic system. We have therefore investigated the evaluation of these in Virtual Reality, in monitor-based, and in prototypical setups with special regard to scalability. We found that simulated traffic noise is a non-factor in current evaluations and that involving multiple people and multiple lanes with numerous vehicles is scarce.},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
Using Hexad User Types to Select Suitable Gamification Elements to Encourage Healthy Eating
Maximilian Altmeyer (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus), Marc Schubhan (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus), Pascal Lessel (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus), Linda Muller (Saarland University, Saarland Informatics Campus), Antonio Krüger Saarland University (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Late Breaking Work | Links:
@incollection{AltmeyerUsing,
title = {Using Hexad User Types to Select Suitable Gamification Elements to Encourage Healthy Eating},
author = {Maximilian Altmeyer (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus) and Marc Schubhan (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus) and Pascal Lessel (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus) and Linda Muller (Saarland University, Saarland Informatics Campus) and Antonio Krüger Saarland University (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus)},
doi = {10.1145/3334480.3383011},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Given that an increasing number of people cultivate poor eating habits, encouraging people to eat healthy is important. One way to motivate people eating healthy is using gamification, i.e. using game elements in a non-game context. Often, a static set of gamification elements is used. However, research suggests that the motivational impact of gamification elements differs substantially across users, demanding personalized approaches. In this paper, we contribute to this by investigating the perception of frequently used gamification elements in the healthy eating domain and correlations to Hexad user types in an online study (N=237). To do so, we created storyboards illustrating these gamification elements and show their comprehensibility in a lab study (N=8). Our results validate and extend previous research in the healthy eating domain, underline the need for personalization and could be used to inform the design of gamified systems for healthy eating.},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {incollection}
}
Vibrotactile Funneling Illusion and Localization Performance on the Head
Oliver Beren Kaul (Leibniz University Hannover), Michael Rohs (Leibniz University Hannover), Benjamin Simon (Leibniz University Hannover), Kerem Can Demir (Leibniz University Hannover), Kamillo Ferry (Leibniz University Hannover)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{KaulVibrotactile,
title = {Vibrotactile Funneling Illusion and Localization Performance on the Head},
author = {Oliver Beren Kaul (Leibniz University Hannover) and Michael Rohs (Leibniz University Hannover) and Benjamin Simon (Leibniz University Hannover) and Kerem Can Demir (Leibniz University Hannover) and Kamillo Ferry (Leibniz University Hannover)},
url = {https://youtu.be/emySptGIP9Y, Video},
doi = {10.1145/3313831.3376335},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {The vibrotactile funneling illusion is the sensation of a single (non-existing) stimulus somewhere in-between the actual stimulus locations. Its occurrence depends upon body location, distance between the actuators, signal synchronization, and intensity. Related work has shown that the funneling illusion may occur on the forehead. We were able to reproduce these findings and explored five further regions to get a more complete picture of the occurrence of the funneling illusion on the head. The results of our study (24 participants) show that the actuator distance, for which the funneling illusion occurs, strongly depends upon the head region. Moreover, we evaluated the centralizing bias (smaller perceived than actual actuator distances) for different head regions, which also showed widely varying characteristics. We computed a detailed heat map of vibrotactile localization accuracies on the head. The results inform the design of future tactile head-mounted displays that aim to support the funneling illusion.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Virtual Field Studies: Conducting Studies on Public Displays in Virtual Reality
Ville Mäkelä (LMU Munich / Tampere University), Rivu Radiah (Bundeswehr University Munich), Saleh Alsherif (German University in Cairo), Mohamed Khamis (University of Glasgow), Chong Xiao (LMU Munich), Lisa Borchert (LMU Munich), Albrecht Schmidt (LMU Munich), Florian Alt (Bundeswehr University Munich)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{VilleVirtual,
title = {Virtual Field Studies: Conducting Studies on Public Displays in Virtual Reality},
author = {Ville Mäkelä (LMU Munich / Tampere University) and Rivu Radiah (Bundeswehr University Munich) and Saleh Alsherif (German University in Cairo) and Mohamed Khamis (University of Glasgow) and Chong Xiao (LMU Munich) and Lisa Borchert (LMU Munich) and Albrecht Schmidt (LMU Munich) and Florian Alt (Bundeswehr University Munich)},
url = {https://www.twitter.com/mimuc, Twitter},
doi = {10.1145/3313831.3376796},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Field studies on public displays can be difficult, expensive, and time-consuming. We investigate the feasibility of using virtual reality (VR) as a test-bed to evaluate deployments of public displays. Specifically, we investigate whether results from virtual field studies, conducted in a virtual public space, would match the results from a corresponding real-world setting. We report on two empirical user studies where we compared audience behavior around a virtual public display in the virtual world to audience behavior around a real public display. We found that virtual field studies can be a powerful research tool, as in both studies we observed largely similar behavior between the settings. We discuss the opportunities, challenges, and limitations of using virtual reality to conduct field studies, and provide lessons learned from our work that can help researchers decide whether to employ VR in their research and what factors to account for if doing so.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Virtual Reality for Individuals with Occasional Paranoid Thoughts
Leonie Ascone (University Medical Center Hamburg-Eppendorf), Karolin Ney (Universität Hamburg), Fariba Mostajeran (Universität Hamburg), Frank Steinicke (Universität Hamburg), Steffen Moritz (University Medical Center Hamburg-Eppendorf), Jürgen Gallinat (University Medical Center Hamburg-Eppendorf), Simone Kühn (University Medical Center Hamburg-Eppendorf)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{AsconeVirtual,
title = {Virtual Reality for Individuals with Occasional Paranoid Thoughts},
author = {Leonie Ascone (University Medical Center Hamburg-Eppendorf) and Karolin Ney (Universität Hamburg) and Fariba Mostajeran (Universität Hamburg) and Frank Steinicke (Universität Hamburg) and Steffen Moritz (University Medical Center Hamburg-Eppendorf) and Jürgen Gallinat (University Medical Center Hamburg-Eppendorf) and Simone Kühn (University Medical Center Hamburg-Eppendorf)},
doi = {10.1145/3334480.3382918},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Individuals with paranoia often experience a high level of self-criticism and negative emotions. Guided compassion-focused (CF) imagery has shown to be successful in reducing these negative emotions and paranoid thoughts. However, some individuals have difficulties with CF imagery. By enabling a sense of presence, immersive virtual environments can overcome these limitations and induce specific emotional responses to support the development of self-compassionate feelings. In our study, we compared an immersive CF (CF-VR) with a controlled VR condition in a student sample of N = 21 participants with slightly elevated symptoms of paranoia. A virtual mission on the moon was designed and implemented to induce self-compassionate feelings with the help of interacting with a space nebula that represented the power of compassion. Our results show that the CF-VR intervention was well accepted and effective in reducing state paranoid thoughts. Worry decreased significantly within the CF-VR group, while self-compassion increased. },
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
Virtual Reality Without Vision: A Haptic and Auditory White Cane to Navigate Complex Virtual Worlds
Alexa Siu (Stanford University), Mike Sinclair (Microsoft Research), Robert Kovacs (Hasso Plattner Institute), Eyal Ofek (Microsoft Research), Christian Holz (Microsoft Research), Edward Cutrell (Microsoft Research)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Tags: Full Paper, Honorable Mention | Links:
@inproceedings{SiuVirtual,
title = {Virtual Reality Without Vision: A Haptic and Auditory White Cane to Navigate Complex Virtual Worlds},
author = {Alexa Siu (Stanford University) and Mike Sinclair (Microsoft Research) and Robert Kovacs (Hasso Plattner Institute) and Eyal Ofek (Microsoft Research) and Christian Holz (Microsoft Research) and Edward Cutrell (Microsoft Research)},
doi = {10.1145/3313831.3376353},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Full Paper, Honorable Mention},
pubstate = {published},
tppubtype = {inproceedings}
}
VRSketchIn: Exploring the Design Space of Pen and Tablet Interaction for 3D Sketching in Virtual Reality
Tobias Drey (Ulm University), Jan Gugenheimer (Ulm University), Julian Karlbauer (Ulm University), Maximilian Milo (Ulm University), Enrico Rukzio (Ulm University)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{DreyVRSketchIn,
title = {VRSketchIn: Exploring the Design Space of Pen and Tablet Interaction for 3D Sketching in Virtual Reality},
author = {Tobias Drey (Ulm University) and Jan Gugenheimer (Ulm University) and Julian Karlbauer (Ulm University) and Maximilian Milo (Ulm University) and Enrico Rukzio (Ulm University)},
url = {https://youtu.be/99hIlAbfan4, Video
https://www.twitter.com/mi_uulm, Twitter},
doi = {10.1145/3313831.3376628},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Sketching in virtual reality (VR) enhances perception and understanding of 3D volumes, but is currently a challenging task, as spatial input devices (e.g., tracked controllers) do not provide any scaffolding or constraints for mid-air interaction. We present VRSketchIn, a VR sketching application using a 6DoF-tracked pen and a 6DoF-tracked tablet as input devices, combining unconstrained 3D mid-air with constrained 2D surface-based sketching. To explore what possibilities arise from this combination of 2D (pen on tablet) and 3D input (6DoF pen), we present a set of design dimensions and define the design space for 2D and 3D sketching interaction metaphors in VR. We categorize prior art inside our design space and implemented a subset of metaphors for pen and tablet sketching in our prototype. To gain a deeper understanding which specific sketching operations users perform with 2D and which with 3D metaphors, we present findings of usability walkthroughs with six participants.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
VRsneaky: Stepping into an Audible Virtual World with Gait-Aware Auditory Feedback
Felix Dietz (LMU Munich), Matthias Hoppe (LMU Munich), Jakob Karolus(LMU Munich), Paweł W. Wozniak (Utrecht University), Albrecht Schmidt (LMU Munich), Tonja Machulla (LMU Munich)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Interactivity/Demonstration | Links:
@inproceedings{DietzVR,
title = {VRsneaky: Stepping into an Audible Virtual World with Gait-Aware Auditory Feedback},
author = {Felix Dietz (LMU Munich) and Matthias Hoppe (LMU Munich) and Jakob Karolus(LMU Munich) and Paweł W. Wozniak (Utrecht University) and Albrecht Schmidt (LMU Munich) and Tonja Machulla (LMU Munich)},
url = {https://www.dropbox.com/s/skz61keimjm5hso/VRsneaky.mp4?dl=0, Video
https://www.twitter.com/mimuc, Twitter},
doi = {10.1145/3334480.3383168},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {New VR experiences allow users to walk extensively in the virtual space. Bigger tracking spaces, treadmills and redirected walking solutions are now available. Yet, certain connections to the user's movement are still not made. Here, we specifically see a shortcoming of representations of locomotion feedback in state-of-the-art VR setups. As shown in our paper VRsneaky, providing synchronized step sounds is important for involving the user further into the experience and virtual world, but is often neglected. VRsneaky detects the user's gait and plays synchronized gait-aware step sounds accordingly by attaching force sensing resistors (FSR) and accelerometers to the user's shoe. In an exciting bank robbery the user will try to rob the bank behind a guards back. The tension will increase as the user has to be aware of each step in this atmospheric experience. Each step will remind the user to pay attention to every movement, as each step will be represented using adaptive step sounds resulting in different noise levels.},
keywords = {Interactivity/Demonstration},
pubstate = {published},
tppubtype = {inproceedings}
}
Walk The Line: Leveraging Lateral Shifts of the Walking Path as an Input Modality for Head-Mounted Displays
Florian Müller (TU Darmstadt), Martin Schmitz (TU Darmstadt), Daniel Schmitt (TU Darmstadt), Sebastian Günther (TU Darmstadt), Markus Funk (TU Darmstadt), Max Mühlhäuser (TU Darmstadt)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{MuellerWalk,
title = {Walk The Line: Leveraging Lateral Shifts of the Walking Path as an Input Modality for Head-Mounted Displays},
author = {Florian Müller (TU Darmstadt) and Martin Schmitz (TU Darmstadt) and Daniel Schmitt (TU Darmstadt) and Sebastian Günther (TU Darmstadt) and Markus Funk (TU Darmstadt) and Max Mühlhäuser (TU Darmstadt)},
url = {https://youtu.be/6-XrF6J9cTc, Video
https://twitter.com/search?q=%23teamdarmstadt&src=typed_query&f=live, Twitter},
doi = {10.1145/3313831.3376852},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Recent technological advances have made head-mounted displays (HMDs) smaller and untethered, fostering the vision of ubiquitous interaction in a digitally augmented physical world. Consequently, a major part of the interaction with such devices will happen on the go, calling for interaction techniques that allow users to interact while walking. In this paper, we explore lateral shifts of the walking path as a hands-free input modality. The available input options are visualized as lanes on the ground parallel to the user's walking path. Users can select options by shifting the walking path sideways to the respective lane. We contribute the results of a controlled experiment with 18 participants, confirming the viability of our approach for fast, accurate, and joyful interactions. Further, based on the findings of the controlled experiment, we present three example applications.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Walking by Cycling: A Novel In-Place Locomotion User Interface for Seated Virtual Reality Experiences
Jann Philipp Freiwald (Uni Hamburg), Oscar Ariza (Uni Hamburg), Omar Janeh (Uni Hamburg), Frank Steinicke (Uni Hamburg)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{FreiwaldWalking,
title = {Walking by Cycling: A Novel In-Place Locomotion User Interface for Seated Virtual Reality Experiences},
author = {Jann Philipp Freiwald (Uni Hamburg) and Oscar Ariza (Uni Hamburg) and Omar Janeh (Uni Hamburg) and Frank Steinicke (Uni Hamburg)},
doi = {10.1145/3313831.3376574},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {We introduce VR Strider, a novel locomotion user interface (LUI) for seated virtual reality (VR) experiences, which maps cycling biomechanics of the user's legs to virtual walking movements. The core idea is to translate the motion of pedaling on a mini exercise bike to a corresponding walking animation of a virtual avatar while providing audio-based tactile feedback on virtual ground contacts. We conducted an experiment to evaluate the LUI and our novel anchor-turning rotation control method regarding task performance, spatial cognition, VR sickness, sense of presence, usability and comfort in a path-integration task. The results show that VR Strider has a significant positive effect on the participants' angular and distance estimation, sense of presence and feeling of comfort compared to other established locomotion techniques, such as teleportation and joystick-based navigation. A confirmatory study further indicates the necessity of synchronized avatar animations for virtual vehicles that rely on pedalling.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Watch my Painting: The Back of the Hand as a Drawing Space for Smartwatches
Maximilian Schrapel (Leibniz University Hannover), Florian Herzog (Leibniz University Hannover), Steffen Ryll (Leibniz University Hannover), Michael Rohs (Leibniz University Hannover)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{SchrapelWatch,
title = {Watch my Painting: The Back of the Hand as a Drawing Space for Smartwatches},
author = {Maximilian Schrapel (Leibniz University Hannover) and Florian Herzog (Leibniz University Hannover) and Steffen Ryll (Leibniz University Hannover) and Michael Rohs (Leibniz University Hannover)},
url = {https://youtu.be/39V9im4Bm5Q, Video},
doi = {10.1145/3334480.3383040},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {"Smartwatches can be used independently from smartphones, but input tasks like messaging are cumbersome due to the small display size. Parts of the display are hidden during interaction, which can lead to incorrect input. For simplicity, instead of general text input a small set of answer options are often provided, but these are limited and impersonal. In contrast, free-form drawings can answer messages in a very personal way, but are difficult to produce on small displays.
To enable precise drawing input on smartwatches we present a magnetic stylus that is tracked on the back of the hand. In an evaluation of several algorithms we show that 3D position estimation with a 7.5x20mm magnet reaches a worst-case 6% relative position error on the back of the hand. Furthermore, the results of a user study are presented, which show that in the case of drawing applications the presented technique is faster and more precise than direct finger input."},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
To enable precise drawing input on smartwatches we present a magnetic stylus that is tracked on the back of the hand. In an evaluation of several algorithms we show that 3D position estimation with a 7.5x20mm magnet reaches a worst-case 6% relative position error on the back of the hand. Furthermore, the results of a user study are presented, which show that in the case of drawing applications the presented technique is faster and more precise than direct finger input."
Watch+Strap: Extending Smartwatches with Interactive StrapDisplays
Konstantin Klamka (Technische Universität Dresden), Tom Horak (Technische Universität Dresden), Raimund Dachselt (Technische Universität Dresden)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Full Paper | Links:
@inproceedings{KlamkaWatchStrap,
title = {Watch+Strap: Extending Smartwatches with Interactive StrapDisplays},
author = {Konstantin Klamka (Technische Universität Dresden) and Tom Horak (Technische Universität Dresden) and Raimund Dachselt (Technische Universität Dresden) },
url = {https://youtu.be/Op8-gh5GSxI, Video
https://www.twitter.com/imldresden, Twitter},
doi = {10.1145/3313831.3376199},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
institution = {TU Dresden},
abstract = {While smartwatches are widely adopted these days, their input and output space remains fairly limited by their screen size. We present StrapDisplays—interactive watchbands with embedded display and touch technologies—that enhance commodity watches and extend their input and output capabilities. After introducing the physical design space of these StrapDisplays, we explore how to combine a smartwatch and straps in a synergistic Watch+Strap system. Specifically, we propose multiple interface concepts that consider promising content distributions, interaction techniques, usage types, and display roles. For example, the straps can enrich watch apps, display visualizations, provide glanceable feedback, or help avoiding occlusion issues. Further, we provide a modular research platform incorporating three StrapDisplay prototypes and a flexible web-based software architecture, demonstrating the feasibility of our approach. Early brainstorming sessions with 15 participants informed our design process, while later interviews with six experts supported our concepts and provided valuable feedback for future developments.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
What does the Oscilloscope Say?: Comparing the Efficiency of In-Situ Visualisations during Circuit Analysis
Adam Nowak (LMU Munich), Pascal Knierim (LMU Munich), Andrzej Romanowski (LMU Munich), Albrecht Schmidt (LMU Munich), Thomas Kosch (LMU Munich)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Late Breaking Work | Links:
@inproceedings{NowakWhat,
title = {What does the Oscilloscope Say?: Comparing the Efficiency of In-Situ Visualisations during Circuit Analysis},
author = {Adam Nowak (LMU Munich) and Pascal Knierim (LMU Munich) and Andrzej Romanowski (LMU Munich) and Albrecht Schmidt (LMU Munich) and Thomas Kosch (LMU Munich)},
doi = {10.1145/3334480.3382890},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Traditional measuring devices separate probes from their data visualisation, requiring the operator to switch attention between their metering and result frequently. We explored the efficiency of four different visualisation modalities during a circuit analysis task that utilises the output of an oscilloscope. We argue that the spatial alignment of an oscilloscope's display and probe interferes with the cognitive processing of data visualisations, hence increasing the probability of errors and required time. We compared a fixed placed oscilloscope, in-situ projections, user positioned tablets, and head-mounted display while measuring completion times, subjective workload, number of errors, and personal preferences after each task. Results indicate that the oscilloscope produced the lowest completion time compared to other modalities. However, visualising data on a user positioned tablet or through in-situ projections} yielded lower subjective workload and a lower number of errors. We discuss how our work generalises for assistive systems that support practitioners during their training in circuit analysis.},
keywords = {Late Breaking Work},
pubstate = {published},
tppubtype = {inproceedings}
}
WindowWall: Towards Adaptive Buildings with Interactive Windows As Ubiquitous Displays
Patrick Bader (University of Stuttgart), Alexandra Voit (adesso AG), Huy Viet Le (University of Stuttgart), Paweł W. Woźniak (Utrecht University), Niels Henze (University of Regensburg), Albrecht Schmidt (LMU Munich)
In: Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020, ACM, 2020.
Abstract | Tags: Other | Links:
@inproceedings{BaderWindow,
title = {WindowWall: Towards Adaptive Buildings with Interactive Windows As Ubiquitous Displays},
author = {Patrick Bader (University of Stuttgart) and Alexandra Voit (adesso AG) and Huy Viet Le (University of Stuttgart) and Paweł W. Woźniak (Utrecht University) and Niels Henze (University of Regensburg) and Albrecht Schmidt (LMU Munich)},
url = {https://www.twitter.com/mimuc, Twitter},
doi = {10.1145/3310275},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {As architects usually decide on the shape and look of windows during the design of buildings, opportunities for interactive windows have not been systematically explored yet. In this work, we extend the vision of sustainable and comfortable adaptive buildings using interactive smart windows. We systematically explore the design space of interactive windows to chart requirements, constraints, and challenges. To that end, we built proof-of-concept prototypes of smart windows with fine-grained control of transparency. In two studies, we explored user attitudes towards interactive windows and elicited control methods. We found that users understand and see potential for interactive windows at home. We provide specific usage contexts and specify interactions that may facilitate domestic applications. Our work illustrates the concept of interactive smart windows and provides insights regarding their design, development, and user controls for adaptive walls. We identify design dimensions and challenges to stimulate further development in the domain of adaptive buildings.},
keywords = {Other},
pubstate = {published},
tppubtype = {inproceedings}
}