2020 in Numbers
This year, the German labs contribute 138 publications in total to the 2020 ACM CHI Conference on Human Factors in Computing Systems. At the heart, there are 83 Papers, including 1 Best Paper and 14 Honorable Mentions. Further, we bring 34 Late-Breaking Works, 5 Demonstrations, 7 organized Workshops & Symposia, 2 Case Studies, 2 Journal Articles, 1 SIG, 1 SIGCHI Outstanding Dissertation Award and 1 Student Game Competition to CHI this year. All these publications are listed below.
'It’s in my other hand!' - Studying the Interplay of Interaction Techniques and Multi-Tablet Activities
Johannes Zagermann (University of Konstanz), Ulrike Pfeil (University of Konstanz), Philipp von Bauer (University of Konstanz), Daniel Fink (University of Konstanz), Harald Reiterer (University of Konstanz)
Abstract | Tags: Full Paper | Links:
@inproceedings{ZagermannStudying,
title = {'It’s in my other hand!' - Studying the Interplay of Interaction Techniques and Multi-Tablet Activities},
author = {Johannes Zagermann (University of Konstanz) and Ulrike Pfeil (University of Konstanz) and Philipp von Bauer (University of Konstanz) and Daniel Fink (University of Konstanz) and Harald Reiterer (University of Konstanz)},
url = {https://youtu.be/_LZsSPP1FM4, Video
https://www.twitter.com/HCIGroupKN, Twitter},
doi = {10.1145/3313831.3376540},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Cross-device interaction with tablets is a popular topic in HCI research. Recent work has shown the benefits of including multiple devices into users’ workflows while various interaction techniques allow transferring content across devices. However, users are only reluctantly using multiple devices in combination. At the same time, research on cross-device interaction struggles to find a frame of reference to compare techniques or systems. In this paper, we try to address these challenges by studying the interplay of interaction techniques, device utilization, and task-specific activities in a user study with 24 participants from different but complementary angles of evaluation using an abstract task, a sensemaking task, and three interaction techniques. We found that different interaction techniques have a lower influence than expected, that work behaviors and device utilization depend on the task at hand, and that participants value specific aspects of cross-device interaction.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}

3D-Auth: Two-Factor Authentication with Personalized 3D-Printed Items
Karola Marky (TU Darmstadt), Martin Schmitz (TU Darmstadt), Verena Zimmer (TU Darmstadt), Martin Herbers (TU Darmstadt), Kai Kunze (Keio Media Design), Max Mühlhäuser (TU Darmstadt)
Abstract | Tags: Full Paper | Links:
@inproceedings{Marky3D,
title = {3D-Auth: Two-Factor Authentication with Personalized 3D-Printed Items},
author = {Karola Marky (TU Darmstadt) and Martin Schmitz (TU Darmstadt) and Verena Zimmer (TU Darmstadt) and Martin Herbers (TU Darmstadt) and Kai Kunze (Keio Media Design) and Max Mühlhäuser (TU Darmstadt)},
url = {https://youtu.be/_dHihnJTRek, Video
https://twitter.com/search?q=%23teamdarmstadt&src=typed_query&f=live, Twitter},
doi = {10.1145/3313831.3376189},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Two-factor authentication is a widely recommended security mechanism and already offered for different services. However, known methods and physical realizations exhibit considerable usability and customization issues. In this paper, we propose 3D-Auth, a new concept of two-factor authentication. 3D-Auth is based on customizable 3D-printed items that combine two authentication factors in one object. The object bottom contains a uniform grid of conductive dots that are connected to a unique embedded structure inside the item. Based on the interaction with the item, different dots turn into touch-points and form an authentication pattern. This pattern can be recognized by a capacitive touchscreen. Based on an expert design study, we present an interaction space with six categories of possible authentication interactions. In a user study, we demonstrate the feasibility of 3D-Auth items and show that the items are easy to use and the interactions are easy to remember.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}

A Human Touch: Social Touch Increases the Perceived Human-likeness of Agents in Virtual Reality
Matthias Hoppe (LMU Munich), Beat Rossmy (LMU Munich), Daniel Peter Neumann (LMU Munich), Stephan Streuber (University of Konstanz), Albrecht Schmidt (LMU Munich), Tonja Machulla (LMU Munich)
Abstract | Tags: Full Paper | Links:
@inproceedings{HoppeAHumanTouch,
title = {A Human Touch: Social Touch Increases the Perceived Human-likeness of Agents in Virtual Reality},
author = {Matthias Hoppe (LMU Munich) and Beat Rossmy (LMU Munich) and Daniel Peter Neumann (LMU Munich) and Stephan Streuber (University of Konstanz) and Albrecht Schmidt (LMU Munich) and Tonja Machulla (LMU Munich)},
url = {https://www.twitter.com/mimuc, Twitter},
doi = {10.1145/3313831.3376719},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Virtual Reality experiences and games present believable virtual environments based on graphical quality, spatial audio, and interactivity. The interaction with in-game characters, controlled by computers (agents) or humans (avatars), is an important part of VR experiences. Pre-captured motion sequences increase the visual humanoid resemblance. However, this still precludes realistic social interactions (eye contact, imitation of body language), particularly for agents. We aim to make social interaction more realistic via social touch. Social touch is non-verbal, conveys feelings and signals (coexistence, closure, intimacy). In our research, we created an artificial hand to apply social touch in a repeatable and controlled fashion to investigate its effect on the perceived human-likeness of avatars and agents. Our results show that social touch is effective to further blur the boundary between computer- and human-controlled virtual characters and contributes to experiences that closely resemble human-to-human interactions.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}

A Longitudinal Video Study on Communicating Status and Intent for Self-Driving Vehicle – Pedestrian Interaction
Stefanie M. Faas (Mercedes-Benz AG / Ulm University), Andrea C. Kao (Mercedes-Benz RD NA), Martin Baumann (Ulm University)
Abstract | Tags: Full Paper | Links:
@inproceedings{FassLongitudinal,
title = {A Longitudinal Video Study on Communicating Status and Intent for Self-Driving Vehicle – Pedestrian Interaction},
author = {Stefanie M. Faas (Mercedes-Benz AG / Ulm University) and Andrea C. Kao (Mercedes-Benz RD NA) and Martin Baumann (Ulm University)},
doi = {10.1145/3313831.3376484},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {With self-driving vehicles (SDVs), pedestrians cannot rely on communication with the driver anymore. Industry experts and policymakers are proposing an external Human-Machine Interface (eHMI) communicating the automated status. We investigated whether additionally communicating SDVs’ intent to give right of way further improves pedestrians’ street crossing. To evaluate the stability of these eHMI effects, we conducted a three-session video study with N=34 pedestrians where we assessed subjective evaluations and crossing onset times. This is the first work capturing long-term effects of eHMIs. Our findings add credibility to prior studies by showing that eHMI effects last (acceptance, user experience) or even increase (crossing onset, perceived safety, trust, learnability, reliance) with time. We found that pedestrians benefit from an eHMI communicating SDVs’ status, and that additionally communicating SDVs’ intent adds further value. We conclude that SDVs should be equipped with an eHMI communicating both status and intent.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}

A View on the Viewer: Gaze-Adaptive Captions for Videos
Kuno Kurzhals (ETH Zürich), Fabian Göbel (ETH Zürich), Katrin Angerbauer (University of Stuttgart), Michael Sedlmair (University of Stuttgart), Martin Raubal (ETH Zürich)
Abstract | Tags: Full Paper, Honorable Mention | Links:
@inproceedings{KurzhalsView,
title = {A View on the Viewer: Gaze-Adaptive Captions for Videos},
author = {Kuno Kurzhals (ETH Zürich) and Fabian Göbel (ETH Zürich) and Katrin Angerbauer (University of Stuttgart) and Michael Sedlmair (University of Stuttgart) and Martin Raubal (ETH Zürich)},
doi = {10.1145/3313831.3376266},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {"Subtitles play a crucial role in cross-lingual distribution ofmultimedia content and help communicate
information where auditory content is not feasible (loud environments, hearing impairments, unknown languages). Established methods utilize text at the bottom of the screen, which may distract from the video. Alternative techniques place captions closer to relatedcontent (e.g., faces) but are not applicable to arbitrary videos such as documentations. Hence, we propose to leverage live gaze as indirect input method to adapt captions to individual viewing behavior. We implemented two gaze-adaptive methods and compared them in a user study (n=54) to traditional captions and audio-only videos. The results show that viewers with less experience with captions prefer our gaze-adaptive methods as they assist them in reading. Furthermore, gaze distributions resulting from our methods are closer to natural viewing behavior compared to the traditional approach. Based on these results, we provide design implications for gaze-adaptive captions."},
keywords = {Full Paper, Honorable Mention},
pubstate = {published},
tppubtype = {inproceedings}
}
information where auditory content is not feasible (loud environments, hearing impairments, unknown languages). Established methods utilize text at the bottom of the screen, which may distract from the video. Alternative techniques place captions closer to relatedcontent (e.g., faces) but are not applicable to arbitrary videos such as documentations. Hence, we propose to leverage live gaze as indirect input method to adapt captions to individual viewing behavior. We implemented two gaze-adaptive methods and compared them in a user study (n=54) to traditional captions and audio-only videos. The results show that viewers with less experience with captions prefer our gaze-adaptive methods as they assist them in reading. Furthermore, gaze distributions resulting from our methods are closer to natural viewing behavior compared to the traditional approach. Based on these results, we provide design implications for gaze-adaptive captions."
AL: An Adaptive Learning Support System for Argumentation Skills
Thiemo Wambsganß (University of St.Gallen), Christina Niklaus (University of St.Gallen), Matthias Cetto (University of St. Gallen), Matthias Söllner (University of Kassel & University of St. Gallen), Siegfried Handschuh (University of St. Gallen & University of Passau),, Jan Marco Leimeister (University of St. Gallen & Kassel University)
Tags: Full Paper, Honorable Mention | Links:
@inproceedings{WambsganssAL,
title = {AL: An Adaptive Learning Support System for Argumentation Skills},
author = {Thiemo Wambsganß (University of St.Gallen), Christina Niklaus (University of St.Gallen), Matthias Cetto (University of St. Gallen), Matthias Söllner (University of Kassel & University of St. Gallen), Siegfried Handschuh (University of St. Gallen & University of Passau), and Jan Marco Leimeister (University of St. Gallen & Kassel University)},
doi = {10.1145/3313831.3376851},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Full Paper, Honorable Mention},
pubstate = {published},
tppubtype = {inproceedings}
}
Assessing 2D and 3D Heatmaps for Comparative Analysis: An Empirical Study
Matthias Kraus (University of Konstanz), Katrin Angerbauer (University of Stuttgart), Juri Buchmüller (University of Konstanz), Daniel Schweitzer (University of Konstanz), Daniel Keim (University of Konstanz), Michael Sedlmair (University of Stuttgart), Johannes Fuchs (University of Konstanz)
Abstract | Tags: Full Paper | Links:
@inproceedings{KrausAssessing,
title = {Assessing 2D and 3D Heatmaps for Comparative Analysis: An Empirical Study},
author = {Matthias Kraus (University of Konstanz) and Katrin Angerbauer (University of Stuttgart) and Juri Buchmüller (University of Konstanz) and Daniel Schweitzer (University of Konstanz) and Daniel Keim (University of Konstanz) and Michael Sedlmair (University of Stuttgart) and Johannes Fuchs (University of Konstanz)},
url = {https://youtu.be/ybSj8ibu-qA, Video
https://www.twitter.com/dbvis, Twitter},
doi = {10.1145/3313831.3376675},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Heatmaps are a popular visualization technique that encode 2D density distributions using color or brightness. Experimental studies have shown though that both of these visual variables are inaccurate when reading and comparing numeric data values. A potential remedy might be to use 3D heatmaps by introducing height as a third dimension to encode the data. Encoding abstract data in 3D, however, poses many problems, too. To better understand this tradeoff, we conducted an empirical study (N=48) to evaluate the user performance of 2D and 3D heatmaps for comparative analysis tasks. We test our conditions on a conventional 2D screen, but also in a virtual reality environment to allow for real stereoscopic vision. Our main results show that 3D heatmaps are superior in terms of error rate when reading and comparing single data items. However, for overview tasks, the well-established 2D heatmap performs better.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}

Augmented Reality for Older Adults: Exploring Acceptability of Virtual Coaches for Home-based Balance Training in an Aging Population
Fariba Mostajeran (Uni Hamburg), Frank Steinicke (Uni Hamburg), Oscar Ariza (Uni Hamburg), Dimitrios Gatsios (University of Ioannina), Dimitrios Fotiadis (University of Ioannina)
Abstract | Tags: Full Paper | Links:
@inproceedings{MostajeranAugmented,
title = {Augmented Reality for Older Adults: Exploring Acceptability of Virtual Coaches for Home-based Balance Training in an Aging Population},
author = {Fariba Mostajeran (Uni Hamburg) and Frank Steinicke (Uni Hamburg) and Oscar Ariza (Uni Hamburg) and Dimitrios Gatsios (University of Ioannina) and Dimitrios Fotiadis (University of Ioannina)},
doi = {10.1145/3313831.3376565},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Balance training has been shown to be effective in reducing risks of falling, which is a major concern for older adults. Usually, exercise programs are individually prescribed and monitored by physiotherapeutic or medical experts. Unfortunately, supervision and motivation of older adults during home-based exercises cannot be provided on a large scale, in particular, considering an ageing population. Augmented reality (AR) in combination with virtual coaches could provide a reasonable solution to this challenge. We present a first investigation of the acceptance of an AR coaching system for balance training, which can be performed at home. In a human-centered design approach we developed several mock-ups and prototypes, and evaluated them with 76 older adults. The results suggest that older adults find the system encouraging and stimulating. The virtual coach is perceived as an alive, calm, intelligent, and friendly human. However, usability of the entire AR system showed a significant negative correlation with participants' age.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Augmented Reality to Enable Users in Learning Case Grammar from Their Real-World Interactions
Fiona Draxler (LMU Munich), Audrey Labrie (Polytechnique Montréal), Albrecht Schmidt (LMU Munich), Lewis L. Chuang (LMU Munich)
Abstract | Tags: Full Paper, Honorable Mention | Links:
@inproceedings{DraxlerAugmented,
title = {Augmented Reality to Enable Users in Learning Case Grammar from Their Real-World Interactions},
author = {Fiona Draxler (LMU Munich) and Audrey Labrie (Polytechnique Montréal) and Albrecht Schmidt (LMU Munich) and Lewis L. Chuang (LMU Munich)},
url = {https://www.twitter.com/mimuc, Twitter},
doi = {10.1145/3313831.3376537},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Augmented Reality (AR) provides a unique opportunity to situate learning content in one's environment. In this work, we investigated how AR could be developed to provide an interactive context-based language learning experience. Specifically, we developed a novel handheld-AR app for learning case grammar by dynamically creating quizzes, based on real-life objects in the learner's surroundings. We compared this to the experience of learning with a non-contextual app that presented the same quizzes with static photographic images. Participants found AR suitable for use in their everyday lives and enjoyed the interactive experience of exploring grammatical relationships in their surroundings. Nonetheless, Bayesian tests provide substantial evidence that the interactive and context-embedded AR app did not improve case grammar skills, vocabulary retention, and usability over the experience with equivalent static images. Based on this, we propose how language learning apps could be designed to combine the benefits of contextual AR and traditional approaches.},
keywords = {Full Paper, Honorable Mention},
pubstate = {published},
tppubtype = {inproceedings}
}

Augmented Reality Training for Industrial Assembly Work – Are Projection-based AR Assistive Systems an Appropriate Tool for Assembly Training?
Sebastian Büttner (TU Clausthal / TH OWL), Michael Prilla (TU Clausthal), Carsten Röcker (TH OWL)
Abstract | Tags: Full Paper, Honorable Mention | Links:
@inproceedings{BuettnerAugmented,
title = {Augmented Reality Training for Industrial Assembly Work – Are Projection-based AR Assistive Systems an Appropriate Tool for Assembly Training?},
author = {Sebastian Büttner (TU Clausthal / TH OWL) and Michael Prilla (TU Clausthal) and Carsten Röcker (TH OWL)},
url = {https://www.twitter.com/HCISGroup},
doi = {10.1145/3313831.3376720},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {"Augmented Reality (AR) systems are on their way to industrial application, e.g. projection-based AR is used to enhance assembly work. Previous studies showed advantages of the systems in permanent-use scenarios, such as faster assembly times.
In this paper, we investigate whether such systems are suitable for training purposes. Within an experiment, we observed the training with a projection-based AR system over multiple sessions and compared it with a personal training and a paper manual training. Our study shows that projection-based AR systems offer only small benefits in the training scenario. While a systematic mislearning of content is prevented through immediate feedback, our results show that the AR training does not reach the personal training in terms of speed and recall precision after 24 hours. Furthermore, we show that once an assembly task is properly trained, there are no differences in the long-term recall precision, regardless of the training method."},
keywords = {Full Paper, Honorable Mention},
pubstate = {published},
tppubtype = {inproceedings}
}
In this paper, we investigate whether such systems are suitable for training purposes. Within an experiment, we observed the training with a projection-based AR system over multiple sessions and compared it with a personal training and a paper manual training. Our study shows that projection-based AR systems offer only small benefits in the training scenario. While a systematic mislearning of content is prevented through immediate feedback, our results show that the AR training does not reach the personal training in terms of speed and recall precision after 24 hours. Furthermore, we show that once an assembly task is properly trained, there are no differences in the long-term recall precision, regardless of the training method."

Becoming a Robot – Overcoming Anthropomorphism with Techno-Mimesis
Judith Dörrenbächer (University of Siegen), Diana Löffler (University of Siegen), Marc Hassenzahl (University of Siegen)
Abstract | Tags: Full Paper | Links:
@inproceedings{DoerrenbaecherBecoming,
title = {Becoming a Robot – Overcoming Anthropomorphism with Techno-Mimesis},
author = {Judith Dörrenbächer (University of Siegen) and Diana Löffler (University of Siegen) and Marc Hassenzahl (University of Siegen)},
doi = {10.1145/3313831.3376507},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Employing anthropomorphism in physical appearance and behavior is the most widespread strategy for designing social robots. In the present paper, we argue that imitating humans impedes the full exploration of robots’ social abilities. In fact, their very ‘thingness’ (e.g., sensors, rationality) is able to create ‘superpowers’ that go beyond human abilities, such as endless patience. To better identify these special abilities, we develop a performative method called ‘Techno-Mimesis’ and explore it in a series of workshops with robot designers. Specifically, we create ‘prostheses’ to allow designers to transform themselves into their future robot to experience use cases from the robot’s perspective, e.g., ‘seeing’ with a distance sensor rather than with eyes. This imperfect imitation helps designers to experience being human and being robot at the same time, making differences apparent and facilitating the discovery of a number of potential physical, cognitive, and communicational robotic superpowers.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}

Bot or not? User Perceptions of Player Substitution with Deep Player Behavior Models
Johannes Pfau (University of Bremen), Jan David Smeddinck (Newcastle University), Ioannis Bikas (University of Bremen), Rainer Malaka (University of Bremen)
Abstract | Tags: Full Paper | Links:
@inproceedings{PfauBot,
title = {Bot or not? User Perceptions of Player Substitution with Deep Player Behavior Models},
author = {Johannes Pfau (University of Bremen) and Jan David Smeddinck (Newcastle University) and Ioannis Bikas (University of Bremen) and Rainer Malaka (University of Bremen)},
url = {https://www.twitter.com/dmlabbremen, Twitter},
doi = {10.1145/3313831.3376223},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Many online games suffer when players drop off due to lost connections or quitting prematurely, which leads to match terminations or game-play imbalances. While rule-based outcome evaluations or substitutions with bots are frequently used to mitigate such disruptions, these techniques are often perceived as unsatisfactory. Deep learning methods have successfully been used in deep player behavior modelling (DPBM) to produce non-player characters or bots which show more complex behavior patterns than those modelled using traditional AI techniques. Motivated by these findings, we present an investigation of the player-perceived awareness, believability and representativeness, when substituting disconnected players with DPBM agents in an online-multiplayer action game. Both quantitative and qualitative outcomes indicate that DPBM agents perform similarly to human players and that players were unable to detect substitutions. In contrast, players were able to detect substitution with agents driven by more traditional heuristics.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}

BrainCoDe: Electroencephalography-based Comprehension Detection during Reading and Listening
Christina Schneegass (LMU Munich), Thomas Kosch (LMU Munich), Andrea Baumann (LMU Munich), Marius Rusu (LMU Munich), Mariam Hassib (Bundeswehr University Munich), Heinrich Hussmann (LMU Munich)
Abstract | Tags: Full Paper | Links:
@inproceedings{SchneegassBrainCode,
title = {BrainCoDe: Electroencephalography-based Comprehension Detection during Reading and Listening},
author = {Christina Schneegass (LMU Munich) and Thomas Kosch (LMU Munich) and Andrea Baumann (LMU Munich) and Marius Rusu (LMU Munich) and Mariam Hassib (Bundeswehr University Munich) and Heinrich Hussmann (LMU Munich)},
url = {https://www.twitter.com/mimuc, Twitter},
doi = {10.1145/3313831.3376707},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {The pervasive availability of media in foreign languages is a rich resource for language learning. However, learners are forced to interrupt media consumption whenever comprehension problems occur. We present BrainCoDe, a method to implicitly detect vocabulary gaps through the evaluation of event-related potentials (ERPs). In a user study (N=16), we evaluate BrainCoDe by investigating differences in ERP amplitudes during listening and reading of known words compared to unknown words. We found significant deviations in N400 amplitudes during reading and in N100 amplitudes during listening when encountering unknown words. To evaluate the feasibility of ERPs for real-time applications, we trained a classifier that detects vocabulary gaps with an accuracy of 87.13% for reading and 82.64% for listening, identifying eight out of ten words correctly as known or unknown. We show the potential of BrainCoDe to support media learning through instant translations or by generating personalized learning content.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}

Breaking The Experience: Effects of Questionnaires in VR User Studies
Susanne Putze (University of Bremen), Dmitry Alexandrovsky (University of Bremen), Felix Putze (University of Bremen), Sebastian Höffner (University of Bremen), Jan David Smeddinck (Newcastle University), Rainer Malaka (University of Bremen)
Abstract | Tags: Full Paper | Links:
@inproceedings{PutzeBreaking,
title = {Breaking The Experience: Effects of Questionnaires in VR User Studies},
author = {Susanne Putze (University of Bremen) and Dmitry Alexandrovsky (University of Bremen) and Felix Putze (University of Bremen) and Sebastian Höffner (University of Bremen) and Jan David Smeddinck (Newcastle University) and Rainer Malaka (University of Bremen)},
url = {https://www.youtube.com/watch?v=iHdW3nphCZQ, Video
https://www.twitter.com/dmlabbremen, Twitter},
doi = {10.1145/3313831.3376144},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Questionnaires are among the most common research tools in virtual reality (VR) evaluations and user studies. However, transitioning from virtual worlds to the physical world to respond to VR experience questionnaires can potentially lead to systematic biases. Administering questionnaires in VR (inVRQs) is becoming more common in contemporary research. This is based on the intuitive notion that inVRQs may ease participation, reduce the Break in Presence (BIP) and avoid biases. In this paper, we perform a systematic investigation into the effects of interrupting the VR experience through questionnaires using physiological data as a continuous and objective measure of presence. In a user study (n=50), we evaluated question-asking procedures using a VR shooter with two different levels of immersion. The users rated their player experience with a questionnaire either inside or outside of VR. Our results indicate a reduced BIP for the employed INVRQ without affecting the self-reported player experience.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Capturing Experts' Mental Models to Organize a Collection of Haptic Devices: Affordances Outweigh Attributes
Hasti Seifi (Max Planck Institute for Intelligent Systems), Michael Oppermann (University of British Columbia), Julia Bullard (University of British Columbia), Karon MacLean (University of British Columbia), Katherine Kuchenbecker (Max Planck Institute for Intelligent Systems)
Tags: Full Paper | Links:
@inproceedings{SeifiCapturing,
title = {Capturing Experts' Mental Models to Organize a Collection of Haptic Devices: Affordances Outweigh Attributes},
author = {Hasti Seifi (Max Planck Institute for Intelligent Systems) and Michael Oppermann (University of British Columbia) and Julia Bullard (University of British Columbia) and Karon MacLean (University of British Columbia) and Katherine Kuchenbecker (Max Planck Institute for Intelligent Systems)},
doi = {10.1145/3313831.3376395},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Developing a Personality Model for Speech-based Conversational Agents Using the Psycholexical Approach
Sarah Theres Völkel (LMU Munich), Ramona Schödel (LMU Munich), Daniel Buschek (University of Bayreuth), Clemens Stachl (Stanford University), Verena Winterhalter (LMU Munich), Markus Bühner (LMU Munich), Heinrich Hussmann (LMU Munich)
Abstract | Tags: Full Paper, Honorable Mention | Links:
@inproceedings{VoelkelDeveloping,
title = {Developing a Personality Model for Speech-based Conversational Agents Using the Psycholexical Approach},
author = {Sarah Theres Völkel (LMU Munich) and Ramona Schödel (LMU Munich) and Daniel Buschek (University of Bayreuth) and Clemens Stachl (Stanford University) and Verena Winterhalter (LMU Munich) and Markus Bühner (LMU Munich) and Heinrich Hussmann (LMU Munich)},
url = {https://www.twitter.com/mimuc, Twitter},
doi = {10.1145/3313831.3376210},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {We present the first systematic analysis of personality dimensions developed specifically to describe the personality of speech-based conversational agents. Following the psycholexical approach from psychology, we first report on a new multi-method approach to collect potentially descriptive adjectives from 1) a free description task in an online survey (228 unique descriptors), 2) an interaction task in the lab (176 unique descriptors), and 3) a text analysis of 30,000 online reviews of conversational agents (Alexa, Google Assistant, Cortana) (383 unique descriptors). We aggregate the results into a set of 349 adjectives, which are then rated by 744 people in an online survey. A factor analysis reveals that the commonly used Big Five model for human personality does not adequately describe agent personality. As an initial step to developing a personality model, we propose alternative dimensions and discuss implications for the design of agent personalities, personality-aware personalisation, and future research.},
keywords = {Full Paper, Honorable Mention},
pubstate = {published},
tppubtype = {inproceedings}
}
Dynamics of Aimed Mid-air Movements
Myroslav Bachynskyi (University of Bayreuth), Jörg Müller (University of Bayreuth)
Tags: Full Paper | Links:
@inproceedings{BachynskyiDynamics,
title = {Dynamics of Aimed Mid-air Movements},
author = {Myroslav Bachynskyi (University of Bayreuth) and Jörg Müller (University of Bayreuth)},
doi = {10.1145/3313831.3376194},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Enemy Within: Long-term Motivation Effects of Deep Player Behavior Models for Dynamic Difficulty Adjustment
Johannes Pfau (University of Bremen), Jan David Smeddinck (Newcastle University), Rainer Malaka (University of Bremen)
Abstract | Tags: Full Paper | Links:
@inproceedings{PfauEnemy,
title = {Enemy Within: Long-term Motivation Effects of Deep Player Behavior Models for Dynamic Difficulty Adjustment},
author = {Johannes Pfau (University of Bremen) and Jan David Smeddinck (Newcastle University) and Rainer Malaka (University of Bremen)},
url = {https://www.youtube.com/watch?v=QOdFmvQnPJQ, Video
https://www.twitter.com/dmlabbremen, Twitter},
doi = {10.1145/3313831.3376423},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Balancing games and producing content that remains interesting and challenging is a major cost factor in the design and maintenance of games. Dynamic difficulty adjustment (DDA) can successfully tune challenge levels to player abilities, but when implemented with classic heuristic parameter tuning (HPT) often turns out to be very noticeable, e.g. as “rubber-banding”. Deep learning techniques can be employed for deep player behavior modeling (DPBM), enabling more complex adaptivity, but effects over frequent and longer-lasting game engagements, as well as comparisons to HPT have not been empirically investigated. We present a situated study of the effects of DDA via DPBM as compared to HPT on intrinsic motivation, perceived challenge and player motivation in a real-world MMORPG. The results indicate that DPBM can lead to significant improvements in intrinsic motivation and players prefer game experience episodes featuring DPBM over experience episodes with classic difficulty management.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Evaluation of a Financial Portfolio Visualization using Computer Displays and Mixed Reality Devices with Domain Experts
Kay Schroeder (Zuyd University of Applied Sciences), Batoul Ajdadilish (Zuyd University of Applied Sciences), Alexander P. Henkel (Zuyd University of Applied Sciences), André Calero Valdez (RWTH Aachen University)
Abstract | Tags: Full Paper | Links:
@inproceedings{SchroederEvaluation,
title = {Evaluation of a Financial Portfolio Visualization using Computer Displays and Mixed Reality Devices with Domain Experts},
author = {Kay Schroeder (Zuyd University of Applied Sciences) and Batoul Ajdadilish (Zuyd University of Applied Sciences) and Alexander P. Henkel (Zuyd University of Applied Sciences) and André Calero Valdez (RWTH Aachen University)},
doi = {10.1145/3313831.3376556},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {With the advent of mixed reality devices such as the Microsoft HoloLens, developers have been faced with the challenge to utilize the third dimension in information visualization effectively. Research on stereoscopic devices has shown that three-dimensional representation can improve accuracy in specific tasks (e.g., network visualization). Yet, so far the field has remained mute on the underlying mechanism. Our study systematically investigates the differences in user perception between a regular monitor and a mixed reality device. In a real-life within-subject experiment in the field with twenty-eight investment bankers, we assessed subjective and objective task performance with two- and three-dimensional systems, respectively. We tested accuracy with regard to position, size, and color using single and combined tasks. Our results do not show a significant difference in accuracy between mixed-reality and standard 2D monitor visualizations.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Examining Design Choices of Questionnaires in VR User Studies
Dmitry Alexandrovsky (University of Bremen), Susanne Putze (University of Bremen), Michael Bonfert (University of Bremen), Sebastian Höffner (University of Bremen), Pitt Michelmann (University of Bremen), Dirk Wenig (University of Bremen), Rainer Malaka (University of Bremen), Jan David Smeddinck (Newcastle University)
Abstract | Tags: Full Paper | Links:
@inproceedings{AlexandrovskyExamining,
title = {Examining Design Choices of Questionnaires in VR User Studies},
author = {Dmitry Alexandrovsky (University of Bremen) and Susanne Putze (University of Bremen) and Michael Bonfert (University of Bremen) and Sebastian Höffner (University of Bremen) and Pitt Michelmann (University of Bremen) and Dirk Wenig (University of Bremen) and Rainer Malaka (University of Bremen) and Jan David Smeddinck (Newcastle University)},
url = {https://www.youtube.com/watch?v=T32Sop_LFu0&feature=youtu.be, Video
https://www.twitter.com/dmlabbremen, Twitter},
doi = {10.1145/3313831.3376260},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Questionnaires are among the most common research tools in virtual reality (VR) user studies. Transitioning from virtuality to reality for giving self-reports on VR experiences can lead to systematic biases. VR allows to embed questionnaires into the virtual environment which may ease participation and avoid biases. To provide a cohesive picture of methods and design choices for questionnaires in VR (inVRQ), we discuss 15 inVRQ studies from the literature and present a survey with 67 VR experts from academia and industry. Based on the outcomes, we conducted two user studies in which we tested different presentation and interaction methods of inVRQs and evaluated the usability and practicality of our design. We observed comparable completion times between inVRQs and questionnaires outside VR (outVRQs) with higher enjoyment but lower usability for INVRQS. These findings advocate the application of INVRQS and provide an overview of methods and considerations that lay the groundwork for inVRQ design.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Exploring Human-Robot Interaction with the Elderly: Results from a Ten-Week Case Study in a Care Home
Felix Carros (Uni Siegen), Johanna Meurer (Uni Siegen), Diana Löffler (Uni Siegen), David Unbehaun (Uni Siegen), Sarah Matthies (Uni Siegen), Inga Koch (Uni Siegen), Rainer Wieching (Uni Siegen), Dave Randall (Uni Siegen), Marc Hassenzahl (Uni Siegen), Volker Wulf (Uni Siegen)
Abstract | Tags: Full Paper | Links:
@inproceedings{CarrosExploring,
title = {Exploring Human-Robot Interaction with the Elderly: Results from a Ten-Week Case Study in a Care Home},
author = {Felix Carros (Uni Siegen) and Johanna Meurer (Uni Siegen) and Diana Löffler (Uni Siegen) and David Unbehaun (Uni Siegen) and Sarah Matthies (Uni Siegen) and Inga Koch (Uni Siegen) and Rainer Wieching (Uni Siegen) and Dave Randall (Uni Siegen) and Marc Hassenzahl (Uni Siegen) and Volker Wulf (Uni Siegen)},
doi = {10.1145/3313831.3376402},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {We conducted an experiment to evaluate the LUI and our novel anchor-turning rotation control method regarding task performance, spatial cognition, VR sickness, sense of presence, usability and comfort in a path-integration task. The results show that VR Strider has a significant positive effect on the participants' angular and distance estimation, sense of presence and feeling of comfort compared to other established locomotion techniques, such as teleportation and joystick-based navigation.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
FaceHaptics: Robot Arm based Versatile Facial Haptics for Immersive Environments
Alexander Wilberz (Hochschule Bonn-Rhein-Sieg), Dominik Leschtschow (Hochschule Bonn-Rhein-Sieg), Christina Trepkowski (Hochschule Bonn-Rhein-Sieg), Jens Maiero (Hochschule Bonn-Rhein-Sieg), Ernst Kruijff (Hochschule Bonn-Rhein-Sieg), Bernhard Riecke (Simon Fraser University)
Tags: Full Paper | Links:
@inproceedings{WilberzFaceHaptics,
title = {FaceHaptics: Robot Arm based Versatile Facial Haptics for Immersive Environments},
author = {Alexander Wilberz (Hochschule Bonn-Rhein-Sieg) and Dominik Leschtschow (Hochschule Bonn-Rhein-Sieg) and Christina Trepkowski (Hochschule Bonn-Rhein-Sieg) and Jens Maiero (Hochschule Bonn-Rhein-Sieg) and Ernst Kruijff (Hochschule Bonn-Rhein-Sieg) and Bernhard Riecke (Simon Fraser University)},
doi = {10.1145/3313831.3376481},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Fairness and Decision-making in Collaborative Shift Scheduling Systems
Alarith Uhde (Uni Siegen), Nadine Schlicker (Ergosign GmbH), Dieter P. Wallach (Ergosign GmbH), Marc Hassenzahl (Uni Siegen)
Abstract | Tags: Full Paper | Links:
@inproceedings{UhdeFairness,
title = {Fairness and Decision-making in Collaborative Shift Scheduling Systems},
author = {Alarith Uhde (Uni Siegen) and Nadine Schlicker (Ergosign GmbH) and Dieter P. Wallach (Ergosign GmbH) and Marc Hassenzahl (Uni Siegen)},
doi = {10.1145/3313831.3376656},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {The strains associated with shift work decrease healthcare workers' well-being. However, shift schedules adapted to their individual needs can partially mitigate these problems. From a computing perspective, shift scheduling was so far mainly treated as an optimization problem with little attention given to the preferences, thoughts, and feelings of the healthcare workers involved. In the present study, we explore fairness as a central, human-oriented attribute of shift schedules as well as the scheduling process. Three in-depth qualitative interviews and a validating vignette study revealed that while on an abstract level healthcare workers agree on equality as the guiding norm for a fair schedule, specific scheduling conflicts should foremost be resolved by negotiating the importance of individual needs. We discuss elements of organizational fairness, including transparency and team spirit. Finally, we present a sketch for fair scheduling systems, summarizing key findings for designers in a readily usable way.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Feminist Living Labs as Research Infrastructures for HCI: The Case of a Video Game Company
Michael Ahmadi (University of Siegen), Rebecca Eilert (University of Siegen), Anne Weibert (University of Siegen), Volker Wulf (University of Siegen), Nicola Marsden (Heilbronn University)
Abstract | Tags: Full Paper | Links:
@inproceedings{AhmadiFeminist,
title = {Feminist Living Labs as Research Infrastructures for HCI: The Case of a Video Game Company},
author = {Michael Ahmadi (University of Siegen) and Rebecca Eilert (University of Siegen) and Anne Weibert (University of Siegen) and Volker Wulf (University of Siegen) and Nicola Marsden (Heilbronn University)},
doi = {10.1145/3313831.3376716},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {The number of women in IT is still low and companies struggle to integrate female professionals. The aim of our research is to provide methodological support for understanding and sharing experiences of gendered practices in the IT industry and encouraging sustained reflection about these matters over time. We established a Living Lab with that end in view, aiming to enhance female participation in the IT workforce and committing ourselves to a participatory approach to the sharing of women’s experiences. Here, using the case of a German video game company which participated in our Lab, we detail our lessons learned. We show that this kind of long-term participation involves challenges over the lifetime of the project but can lead to substantial benefits for organizations. Our findings demonstrate that Living Labs are suitable for giving voice to marginalized groups, addressing their concerns and evoking change possibilities. Nevertheless, uncertainties about long-term sustainability remain.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}

GazeConduits: Calibration-Free Cross-Device Collaboration through Gaze and Touch
Simon Voelker (RWTH), Sebastian Hueber (RWTH), Christian Holz (ETH Zurich), Christian Remy (Aarhus University), Nicolai Marquardt (University College London)
Abstract | Tags: Full Paper | Links:
@inproceedings{VoelkerGaze,
title = {GazeConduits: Calibration-Free Cross-Device Collaboration through Gaze and Touch},
author = {Simon Voelker (RWTH) and Sebastian Hueber (RWTH) and Christian Holz (ETH Zurich) and Christian Remy (Aarhus University) and Nicolai Marquardt (University College London)},
url = {https://youtu.be/Q59SQi0JUkg, Video},
doi = {10.1145/3313831.3376578},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {We present GazeConduits, a calibration-free ad-hoc mobile device setup that enables users to collaboratively interact with tablets, other users, and content in a cross-device setting using gaze and touch input. GazeConduits leverages recently presented phone capabilities to detect facial features and estimate users’ gaze directions. To join a collaborative setting, users place one or more tablets onto a shared table and position their phone in the center, which then tracks present users as well as their gaze direction to predict the tablets they look at. Using GazeConduits, we demonstrate a series of techniques for collaborative interaction across mobile devices for content selection and manipulation. Our evaluation with 20 simultaneous tablets on a table showed that GazeConduits can reliably identify at which tablet or at which collaborator a user is looking, enabling a rich set of interaction techniques.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}

Getting out of Out of Sight: Evaluation of AR Mechanisms for Awareness and Orientation Support in Occluded Multi-Room Settings
Niklas Osmers (TU Clausthal), Michael Prilla (TU Clausthal)
Abstract | Tags: Full Paper | Links:
@inproceedings{OsmersGetting,
title = {Getting out of Out of Sight: Evaluation of AR Mechanisms for Awareness and Orientation Support in Occluded Multi-Room Settings},
author = {Niklas Osmers (TU Clausthal) and Michael Prilla (TU Clausthal)},
url = {https://www.twitter.com/HCISGroup, Twitter},
doi = {10.1145/3313831.3376742},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Augmented Reality can provide orientation and awareness in situations in which objects or people are occluded by physical structures. This is relevant for many situations in the workplace, where objects are scattered across rooms and people are out of sight. While several AR mechanisms have been proposed to provide awareness and orientation in these situations, little is known about their effect on people's performance when searching objects and coordinating with each other. In this paper, we compare three AR based mechanisms (map, x-ray, compass) according to their utility, usability, social presence, task load and users’ preferences. 48 participants had to work together in groups of four to find people and objects located around different rooms. Results show that map and x-ray performed best but provided least social presence among participants. We discuss these and other observations as well as potential impacts on designing AR awareness and orientation support.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}

Guess the Data: Data Work to Understand How People Make Sense of and Use Simple Sensor Data from Homes
Albrecht Kurze (Chemnitz University of Technology), Andreas Bischof (Chemnitz University of Technology), Sören Totzauer (Chemnitz University of Technology), Michael Storz (Chemnitz University of Technology), Maximilian Eibl (Chemnitz University of Technology), Margot Brereton (Queensland University of Technology), Arne Berger (Anhalt University of Applied Sciences)
Abstract | Tags: Full Paper | Links:
@inproceedings{KurzeGuess,
title = {Guess the Data: Data Work to Understand How People Make Sense of and Use Simple Sensor Data from Homes},
author = {Albrecht Kurze (Chemnitz University of Technology) and Andreas Bischof (Chemnitz University of Technology) and Sören Totzauer (Chemnitz University of Technology) and Michael Storz (Chemnitz University of Technology) and Maximilian Eibl (Chemnitz University of Technology) and Margot Brereton (Queensland University of Technology) and Arne Berger (Anhalt University of Applied Sciences)},
url = {https://www.twitter.com/arneberger, Twitter},
doi = {10.1145/3313831.3376273},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Simple smart home sensors, e.g. for temperature or light, increasingly collect seemingly inconspicuous data. Prior work has shown that human sensemaking of such sensor data can reveal domestic activities. Such sensemaking presents an opportunity to empower people to understand the implications of simple smart home sensors. To investigate, we developed and field-tested the Guess the Data method, which enabled people to use and make sense of live data from their homes and to collectively interpret and reflect on anonymized data from the homes in our study. Our findings show how participants reconstruct behavior, both individually and collectively, expose the sensitive personal data of others, and use sensor data as evidence and for lateral surveillance within the household. We discuss the potential of our method as a participatory HCI method for investigating design of the IoT and implications created by doing data work on home sensors.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}

HeadReach: Using Head Tracking to Increase Reachability on Mobile Touch Devices
Simon Voelker (RWTH), Sebastian Hueber (RWTH), Christian Corsten (RWTH), Christian Remy (Aarhus University)
Abstract | Tags: Full Paper | Links:
@inproceedings{VoelkerHeadReach,
title = {HeadReach: Using Head Tracking to Increase Reachability on Mobile Touch Devices},
author = {Simon Voelker (RWTH) and Sebastian Hueber (RWTH) and Christian Corsten (RWTH) and Christian Remy (Aarhus University)},
url = {https://youtu.be/IyVp5VFde2w, Video},
doi = {10.1145/3313831.3376868},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {People often operate their smartphones with only one hand, using just their thumb for touch input. With today’s larger smartphones, this leads to a reachability issue: Users can no longer comfortably touch everywhere on the screen without changing their grip. We investigate using the head tracking in modern smartphones to address this reachability issue. We developed three interaction techniques, pure head (PH), head+ touch (HT), and head area + touch (HA), to select targets beyond the reach of one’s thumb. In two user studies, we found that selecting targets using HT and HA had higher success rates than the default direct touch (DT) while standing (by about 9%) and walking (by about 12%), while being moderately slower. HT and HA were also faster than one of the best techniques, BezelCursor (BC) (by about 20% while standing and 6% while walking), while having the same success rate.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}

Heartbeats in the Wild: A Field Study Exploring ECG Biometrics in Everyday Life
Florian Lehmann (LMU Munich / University of Bayreuth), Daniel Buschek (University of Bayreuth)
Abstract | Tags: Full Paper, Honorable Mention | Links:
@inproceedings{LehmannHeartbeats,
title = {Heartbeats in the Wild: A Field Study Exploring ECG Biometrics in Everyday Life},
author = {Florian Lehmann (LMU Munich / University of Bayreuth) and Daniel Buschek (University of Bayreuth)},
url = {https://www.twitter.com/mimuc, Twitter},
doi = {10.1145/3313831.3376536},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {This paper reports on an in-depth study of electrocardiogram (ECG) biometrics in everyday life. We collected ECG data from 20 people over a week, using a non-medical chest tracker. We evaluated user identification accuracy in several scenarios and observed equal error rates of 9.15% to 21.91%, heavily depending on 1) the number of days used for training, and 2) the number of heartbeats used per identification decision. We conclude that ECG biometrics can work in the wild but are less robust than expected based on the literature, highlighting that previous lab studies obtained highly optimistic results with regard to real life deployments. We explain this with noise due to changing body postures and states as well as interrupted measures. We conclude with implications for future research and the design of ECG biometrics systems for real world deployments, including critical reflections on privacy.},
keywords = {Full Paper, Honorable Mention},
pubstate = {published},
tppubtype = {inproceedings}
}
Heatmaps, Shadows, Bubbles, Rays: Comparing Mid-Air Pen Position Visualizations in Handheld AR
Philipp Wacker (RWTH), Adrian Wagner (RWTH), Simon Voelker (RWTH), Jan Borchers (RWTH)
Abstract | Tags: Full Paper | Links:
@inproceedings{WackerHeatmaps,
title = {Heatmaps, Shadows, Bubbles, Rays: Comparing Mid-Air Pen Position Visualizations in Handheld AR},
author = {Philipp Wacker (RWTH) and Adrian Wagner (RWTH) and Simon Voelker (RWTH) and Jan Borchers (RWTH)},
url = {https://youtu.be/sFPP2xeAEP8, Video},
doi = {10.1145/3313831.3376848},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {In Handheld Augmented Reality, users look at AR scenes through the smartphone held in their hand. In this setting, having a mid-air pointing device like a pen in the other hand greatly expands the interaction possibilities. For example, it lets users create 3D sketches and models while on the go. However, perceptual issues in Handheld AR make it difficult to judge the distance of a virtual object, making it hard to align a pen to it. To address this, we designed and compared different visualizations of the pen's position in its virtual environment, measuring pointing precision, task time, activation patterns, and subjective ratings of helpfulness, confidence, and comprehensibility of each visualization. While all visualizations resulted in only minor differences in precision and task time, subjective ratings of perceived helpfulness and confidence favor a `heatmap' technique that colors the objects in the scene based on their distance to the pen.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}

HiveFive: Immersion Preserving Attention Guidance in Virtual Reality
Daniel Lange (University of Oldenburg), Tim Claudius Stratmann (OFFIS - Institute for IT), Uwe Gruenefeld (OFFIS - Institute for IT), Susanne Boll (University of Oldenburg)
Abstract | Tags: Full Paper | Links:
@inproceedings{LangeHiveFive,
title = {HiveFive: Immersion Preserving Attention Guidance in Virtual Reality},
author = {Daniel Lange (University of Oldenburg) and Tim Claudius Stratmann (OFFIS - Institute for IT) and Uwe Gruenefeld (OFFIS - Institute for IT) and Susanne Boll (University of Oldenburg)},
url = {https://youtu.be/df_onXBj7cM, Video},
doi = {10.1145/3313831.3376803},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Recent advances in Virtual Reality (VR) technology, such as larger fields of view, have made VR increasingly immersive. However, a larger field of view often results in a user focusing on certain directions and missing relevant content presented elsewhere on the screen. With HiveFive, we propose a technique that uses swarm motion to guide user attention in VR. The goal is to seamlessly integrate directional cues into the scene without losing immersiveness. We evaluate HiveFive in two studies. First, we compare biological motion (from a prerecorded swarm) with non-biological motion (from an algorithm), finding further evidence that humans can distinguish between these motion types and that, contrary to our hypothesis, non-biological swarm motion results in significantly faster response times. Second, we compare HiveFive to four other techniques and show that it not only results in fast response times but also has the smallest negative effect on immersion.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}

How to Trick AI: Users’ Strategies for Protecting Themselves From Automatic Personality Assessment
Sarah Theres Völkel (LMU Munich), Renate Häuslschmid (Madeira Interactive Technologies Institute), Anna Werner (LMU Munich), Heinrich Hussmann (LMU Munich), Andreas Butz (LMU Munich)
Abstract | Tags: Full Paper | Links:
@inproceedings{VoelkelHow,
title = {How to Trick AI: Users’ Strategies for Protecting Themselves From Automatic Personality Assessment},
author = {Sarah Theres Völkel (LMU Munich) and Renate Häuslschmid (Madeira Interactive Technologies Institute) and Anna Werner (LMU Munich) and Heinrich Hussmann (LMU Munich) and Andreas Butz (LMU Munich)},
url = {https://www.twitter.com/mimuc, Twitter},
doi = {10.1145/3313831.3376877},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Psychological targeting tries to influence and manipulate users' behaviour. We investigated whether users can protect themselves from being profiled by a chatbot, which automatically assesses users' personality. Participants interacted twice with the chatbot: (1) They chatted for 45 minutes in customer service scenarios and received their actual profile (baseline). (2) They then were asked to repeat the interaction and to disguise their personality by strategically tricking the chatbot into calculating a falsified profile. In interviews, participants mentioned 41 different strategies but could only apply a subset of them in the interaction. They were able to manipulate all Big Five personality dimensions by nearly 10%. Participants regarded personality as very sensitive data. As they found tricking the AI too exhaustive for everyday use, we reflect on opportunities for privacy protective designs in the context of personality-aware systems.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Improving Humans' Ability to Interpret Deictic Gestures in Virtual Reality
Sven Mayer (Carnegie Mellon University / University of Stuttgart), Jens Reinhardt (Hamburg University of Applied Sciences), Robin Schweigert (University of Stuttgart), Brighten Jelke (Macalester College), Valentin Schwind (University of Stuttgart / University of Regensburg), Katrin Wolf (Hamburg University of Applied Sciences), Niels Henze (University of Regensburg)
Abstract | Tags: Full Paper | Links:
@inproceedings{MayerImproving,
title = {Improving Humans' Ability to Interpret Deictic Gestures in Virtual Reality},
author = {Sven Mayer (Carnegie Mellon University / University of Stuttgart) and Jens Reinhardt (Hamburg University of Applied Sciences) and Robin Schweigert (University of Stuttgart) and Brighten Jelke (Macalester College) and Valentin Schwind (University of Stuttgart / University of Regensburg) and Katrin Wolf (Hamburg University of Applied Sciences) and Niels Henze (University of Regensburg)},
url = {https://www.youtube.com/watch?v=Afi4TPzHdlM, Youtube},
doi = {10.1145/3313831.3376340},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Collaborative Virtual Environments (CVEs) offer unique opportunities for human communication. Humans can interact with each other over a distance in any environment and visual embodiment they want. Although deictic gestures are especially important as they can guide other humans' attention, humans make systematic errors when using and interpreting them. Recent work suggests that the interpretation of vertical deictic gestures can be significantly improved by warping the pointing arm. In this paper, we extend previous work by showing that models enable to also improve the interpretation of deictic gestures at targets all around the user. Through a study with 28 participants in a CVE, we analyzed the errors users make when interpreting deictic gestures. We derived a model that rotates the arm of a pointing user's avatar to improve the observing users' accuracy. A second study with 24 participants shows that we can improve observers' accuracy by 22.9%. As our approach is not noticeable for users, it improves their accuracy without requiring them to learn a new interaction technique or distracting from the experience.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}

Improving the Usability and UX of the Swiss Internet Voting Interface
Karola Marky (TU Darmstadt), Verena Zimmermann (TU Darmstadt), Markus Funk (Cerence GmbH), Jörg Daubert (TU Darmstadt), Kira Bleck (TU Darmstadt), Max Mühlhäuser (TU Darmstadt)
Abstract | Tags: Full Paper | Links:
@inproceedings{MarkyImproving,
title = {Improving the Usability and UX of the Swiss Internet Voting Interface},
author = {Karola Marky (TU Darmstadt) and Verena Zimmermann (TU Darmstadt) and Markus Funk (Cerence GmbH) and Jörg Daubert (TU Darmstadt) and Kira Bleck (TU Darmstadt) and Max Mühlhäuser (TU Darmstadt)},
url = {https://twitter.com/search?q=%23teamdarmstadt&src=typed_query&f=live, Twitter},
doi = {10.1145/3313831.3376769},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Up to 20% of residential votes and up to 70% of absentee votes in Switzerland are cast online. The Swiss scheme aims to provide individual verifiability by different verification codes. The voters have to carry out verification on their own, making the usability and UX of the interface of great importance. To improve the usability, we first performed an evaluation with 12 human-computer interaction experts to uncover usability weaknesses of the Swiss Internet voting interface. Based on the experts' findings, related work, and an exploratory user study with 36 participants, we propose a redesign that we evaluated in a user study with 49 participants. Our study confirmed that the redesign indeed improves the detection of incorrect votes by 33% and increases the trust and understanding of the voters. Our studies furthermore contribute important recommendations for designing verifiable e-voting systems in general.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}

Improving Worker Engagement Through Conversational Microtask Crowdsourcing
Sihang Qiu (Delft University of Technology), Ujwal Gadiraju (Leibniz Universität Hannover), Alessandro Bozzon (Delft University of Technology)
Tags: Full Paper | Links:
@inproceedings{QiuImproving,
title = {Improving Worker Engagement Through Conversational Microtask Crowdsourcing},
author = {Sihang Qiu (Delft University of Technology) and Ujwal Gadiraju (Leibniz Universität Hannover) and Alessandro Bozzon (Delft University of Technology)},
doi = {10.1145/3313831.3376403},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
In-game and Out-of-game Social Anxiety Influences Player Motivations, Activities, and Experiences in MMORPGs
Martin Dechant (University of Saskatchewan), Susanne Poeller (University of Trier), Colby Johanson (University of Saskatchewan), Katelyn Wiley (University of Saskatchewn), Regan Mandryk (University of Saskatchewan)
Tags: Full Paper | Links:
@inproceedings{DechantInOut,
title = {In-game and Out-of-game Social Anxiety Influences Player Motivations, Activities, and Experiences in MMORPGs},
author = {Martin Dechant (University of Saskatchewan) and Susanne Poeller (University of Trier) and Colby Johanson (University of Saskatchewan) and Katelyn Wiley (University of Saskatchewn) and Regan Mandryk (University of Saskatchewan)},
doi = {10.1145/3313831.3376734},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Interaction Techniques for Visual Exploration Using Embedded Word-Scale Visualizations
Pascal Goffin (University of Utah), Tanja Blascheck (University of Stuttgart), Petra Isenberg (Inria), Wesley Willett (University of Calgary)
Abstract | Tags: Full Paper | Links:
@inproceedings{GoffinInteraction,
title = {Interaction Techniques for Visual Exploration Using Embedded Word-Scale Visualizations},
author = {Pascal Goffin (University of Utah) and Tanja Blascheck (University of Stuttgart) and Petra Isenberg (Inria) and Wesley Willett (University of Calgary)},
doi = {10.1145/3313831.3376842},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {We describe a design space of view manipulation interactions for small data-driven contextual visualizations (word-scale visualizations). These interaction techniques support an active reading experience and engage readers through exploration of embedded visualizations whose placement and content connect them to specific terms in a document. A reader could, for example, use our proposed interaction techniques to explore word-scale visualizations of stock market trends for companies listed in a market overview article. When readers wish to engage more deeply with the data, they can collect, arrange, compare, and navigate the document using the embedded word-scale visualizations, permitting more visualization-centric analyses. We support our design space with a concrete implementation, illustrate it with examples from three application domains, and report results from two experiments. The experiments show how view manipulation interactions helped readers examine embedded visualizations more quickly and with less scrolling and yielded qualitative feedback on usability and future opportunities.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Investigating User-Created Gamification in an Image Tagging Task
Marc Schubhan (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus), Maximilian Altmeyer (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus), Dominic Buchheit (Saarland University), Pascal Lessel (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus)
Abstract | Tags: Full Paper | Links:
@inproceedings{SchubhanInvestigating,
title = {Investigating User-Created Gamification in an Image Tagging Task},
author = {Marc Schubhan (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus) and Maximilian Altmeyer (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus) and Dominic Buchheit (Saarland University) and Pascal Lessel (German Research Center for Artificial Intelligence (DFKI), Saarland Informatics Campus)},
url = {https://www.youtube.com/watch?v=C_2RE_Tfzys, Video},
doi = {10.1145/3313831.3376360},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Commonly, gamification is designed by developers and not by end-users. In this paper we investigate an approach where users take control of this process. Firstly, users were asked to describe their own gamification concepts which would motivate them to put more effort into an image tagging task. We selected this task as gamification has already been shown to be effective here in previous work. Based on these descriptions, an implementation was made for each concept and given to the creator. In a between-subjects study (n=71), our approach was compared to a no-gamification condition and two conditions with fixed gamification settings. We found that providing participants with an implementation of their own concept significantly increased the amount of generated tags compared to the other conditions. Although the quality of tags was lower, the number of usable tags remained significantly higher in comparison, suggesting the usefulness of this approach.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
JumpVR: Jump-Based Locomotion Augmentation for Virtual Reality
Dennis Wolf (Ulm University), Katja Rogers (Ulm University), Christoph Kunder (Ulm University), Enrico Rukzio (Ulm University)
Abstract | Tags: Full Paper | Links:
@inproceedings{WolfJump,
title = {JumpVR: Jump-Based Locomotion Augmentation for Virtual Reality},
author = {Dennis Wolf (Ulm University) and Katja Rogers (Ulm University) and Christoph Kunder (Ulm University) and Enrico Rukzio (Ulm University)},
url = {https://youtu.be/JNWfs3-V1zQ, Video
https://www.twitter.com/mi_uulm, Twitter},
doi = {10.1145/3313831.3376243},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {One of the great benefits of virtual reality (VR) is the implementation of features that go beyond realism. Common “unrealistic” locomotion techniques (like teleportation) can avoid spatial limitation of tracking but minimize potential benefits of more realistic techniques (e.g., walking). As an alternative that combines realistic physical movement with hyper-realistic virtual outcome, we present JumpVR, a jump-based locomotion augmentation technique that virtually scales users’ physical jumps. In a user study (N=28), we show that jumping in VR (regardless of scaling) can significantly increase presence, motivation and immersion compared to teleportation, while largely not increasing simulator sickness. Further, participants reported higher immersion and motivation for most scaled jumping variants than forward-jumping. Our work shows the feasibility and benefits of jumping in VR and explores suitable parameters for its hyper-realistic scaling. We discuss design implications for VR experiences and research.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}

Leveraging Error Correction in Voice-based Text Entry by Talk-and-Gaze
Korok Sengupta (University of Koblenz), Sabin Bhattarai (University of Koblenz), Sayan Sarcar (University of Tsukuba), Scott MacKenzie (York University), Steffen Staab (University of Stuttgart)
Abstract | Tags: Full Paper | Links:
@inproceedings{SenguptaLeveraging,
title = {Leveraging Error Correction in Voice-based Text Entry by Talk-and-Gaze},
author = {Korok Sengupta (University of Koblenz) and Sabin Bhattarai (University of Koblenz) and Sayan Sarcar (University of Tsukuba) and Scott MacKenzie (York University) and Steffen Staab (University of Stuttgart)},
url = {https://www.twitter.com/AnalyticComp, Twitter},
doi = {10.1145/3313831.3376579},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {We present the design and evaluation of Talk-and-Gaze (TaG), a method for selecting and correcting errors with voice and gaze. TaG uses eye gaze to overcome the inability of voice- only systems to provide spatial information. The user’s point of gaze is used to select an erroneous word either by dwelling on the word for 800 ms (D-TaG) or by uttering a “select” voice command (V-TaG). A user study with 12 participants com- pared D-TaG, V-TaG, and a voice-only method for selecting and correcting words. Corrections were performed more than 20% faster with D-TaG compared to the V-TaG or voice-only methods. As well, D-TaG was observed to require 24% less selection effort than V-TaG and 11% less selection effort than voice-only error correction. D-TaG was well received in a subjective assessment with 66% of users choosing it as their preferred choice for error correction in voice-based text entry},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Levitation Simulator: Prototyping Ultrasonic Levitation Interfaces in Virtual Reality
Viktorija Paneva (University of Bayreuth), Myroslav Bachynskyi (University of Bayreuth), Jörg Müller (University of Bayreuth)
Tags: Full Paper, Honorable Mention | Links:
@inproceedings{PanevaLevitation,
title = {Levitation Simulator: Prototyping Ultrasonic Levitation Interfaces in Virtual Reality},
author = {Viktorija Paneva (University of Bayreuth) and Myroslav Bachynskyi (University of Bayreuth) and Jörg Müller (University of Bayreuth)},
doi = {10.1145/3313831.3376409},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Full Paper, Honorable Mention},
pubstate = {published},
tppubtype = {inproceedings}
}
Listen to Developers! A Participatory Design Study on Security Warnings for Cryptographic APIs
Peter Gorski (TH Köln / University of Applied Sciences), Yasemin Acar (Leibniz University Hannover), Luigi Lo Iacono (TH Köln / University of Applied Sciences), Sascha Fahl (Leibniz University Hannover)
Tags: Full Paper | Links:
@inproceedings{GorskiListen,
title = {Listen to Developers! A Participatory Design Study on Security Warnings for Cryptographic APIs},
author = {Peter Gorski (TH Köln / University of Applied Sciences) and Yasemin Acar (Leibniz University Hannover) and Luigi Lo Iacono (TH Köln / University of Applied Sciences) and Sascha Fahl (Leibniz University Hannover)},
doi = {10.1145/3313831.3376142},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Meaningful Technology at Work – A Reflective Design Case of Improving Radiologists’ Wellbeing Through Medical Technology
Matthias Laschke (Uni Siegen), Christoph Braun (Siemens Healthineers), Robin Neuhaus (Uni Siegen), Marc Hassenzahl (Uni Siegen)
Abstract | Tags: Full Paper | Links:
@inproceedings{LaschkeMeaningful,
title = {Meaningful Technology at Work – A Reflective Design Case of Improving Radiologists’ Wellbeing Through Medical Technology},
author = {Matthias Laschke (Uni Siegen) and Christoph Braun (Siemens Healthineers) and Robin Neuhaus (Uni Siegen) and Marc Hassenzahl (Uni Siegen)},
doi = {10.1145/3313831.3376710},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {The present paper presents a real-world case with a large medial technology provider, showing that medical technology could be designed more holistically to improve radiologists' wellbeing explicitly. Despite all skepticism, our prototypical applications resonated well among the radiologists involved, the healthcare provider, and other customers of the MTP.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Mix&Match: Towards Omitting Modelling through In-Situ Alteration and Remixing of Model Repository Artifacts in Mixed Reality
Evgeny Stemasov (Ulm University), Tobias Wagner (Ulm University), Jan Gugenheimer (Ulm University), Enrico Rukzio (Ulm University)
Abstract | Tags: Full Paper | Links:
@inproceedings{StemasovMix,
title = {Mix&Match: Towards Omitting Modelling through In-Situ Alteration and Remixing of Model Repository Artifacts in Mixed Reality},
author = {Evgeny Stemasov (Ulm University) and Tobias Wagner (Ulm University) and Jan Gugenheimer (Ulm University) and Enrico Rukzio (Ulm University)},
url = {https://youtu.be/Dyb0QRtNtag, Video
https://www.twitter.com/mi_uulm, Twitter},
doi = {10.1145/3313831.3376839},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {The accessibility of tools to model artifacts is one of the core driving factors for the adoption of Personal Fabrication. Subsequently, model repositories like Thingiverse became important tools in (novice) makers' processes. They allow them to shorten or even omit the design process, offloading a majority of the effort to other parties. However, steps like measurement of surrounding constraints (e.g., clearance) which exist only inside the users' environment, can not be similarly outsourced. We propose Mix&Match a mixed-reality-based system which allows users to browse model repositories, preview the models in-situ, and adapt them to their environment in a simple and immediate fashion. Mix&Match aims to provide users with CSG operations which can be based on both virtual and real geometry. We present interaction patterns and scenarios for Mix&Match, arguing for the combination of mixed reality and model repositories. This enables almost modelling-free personal fabrication for both novices and expert makers.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}

Next Steps in Human-Computer Integration
Florian 'Floyd' Mueller (Monash University), Pedro Lopes (University of Chicago), Paul Strohmeier (University of Copenhagen / Saarland University), Wendy Ju (Cornell Tech), Caitlyn Seim (Stanford University), Martin Weigel (Honda Research Institute Europe), Suranga Nanayakkara (University of Auckland), Marianna Obrist (University of Essex), Zhuying Li (Monash University), Joseph Delfa (Monash University), Jun Nishida (University of Chicago), Elizabeth M. Gerber (Northwestern University), Dag Svanaes (NTNU / IT University of Copenhagen), Jonathan Grudin (Microsoft), Stefan Greuter (Deakin University), Kai Kunze (Keio University), Thomas Erickson (Independent researcher), Steven Greenspan (CA Technologies), Masahiko Inami (University of Tokyo), Joe Marshall (University of Nottingham), Harald Reiterer (University of Konstanz), Katrin Wolf (Beuth University of Applied Sciences Berlin), Jochen Meyer (OFFIS), Thecla Schiphorst (Simon Fraser University), Dakuo Wang (IBM Research), Pattie Maes (MIT Media Lab)
Abstract | Tags: Full Paper | Links:
@inproceedings{MuellerIntegration,
title = {Next Steps in Human-Computer Integration},
author = {Florian 'Floyd' Mueller (Monash University) and Pedro Lopes (University of Chicago) and Paul Strohmeier (University of Copenhagen / Saarland University) and Wendy Ju (Cornell Tech) and Caitlyn Seim (Stanford University) and Martin Weigel (Honda Research Institute Europe) and Suranga Nanayakkara (University of Auckland) and Marianna Obrist (University of Essex) and Zhuying Li (Monash University) and Joseph Delfa (Monash University) and Jun Nishida (University of Chicago) and Elizabeth M. Gerber (Northwestern University) and Dag Svanaes (NTNU / IT University of Copenhagen) and Jonathan Grudin (Microsoft) and Stefan Greuter (Deakin University) and Kai Kunze (Keio University) and Thomas Erickson (Independent researcher) and Steven Greenspan (CA Technologies) and Masahiko Inami (University of Tokyo) and Joe Marshall (University of Nottingham) and Harald Reiterer (University of Konstanz) and Katrin Wolf (Beuth University of Applied Sciences Berlin) and Jochen Meyer (OFFIS) and Thecla Schiphorst (Simon Fraser University) and Dakuo Wang (IBM Research) and Pattie Maes (MIT Media Lab)},
doi = {10.1145/3313831.3376242},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Human-Computer Integration (HInt) is an emerging paradigm in which computational and human systems are closely interwoven. Integrating computers with the human body is not new. However, we believe that with rapid technological advancements, increasing real-world deployments, and growing ethical and societal implications, it is critical to identify an agenda for future research. We present a set of challenges for HInt research, formulated over the course of a five-day workshop consisting of 29 experts who have designed, deployed, and studied HInt systems. This agenda aims to guide researchers in a structured way towards a more coordinated and conscientious future of human-computer integration.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
NurseCare: Design and ‘In-The-Wild’ Evaluation of a Mobile System to Promote the Ergonomic Transfer of Patients
Maximilian Dürr (University of Konstanz), Carla Gröschel (University of Konstanz), Ulrike Pfeil (University of Konstanz), Harald Reiterer (University of Konstanz)
Abstract | Tags: Full Paper, Honorable Mention | Links:
@inproceedings{DuerrNurseCare,
title = {NurseCare: Design and ‘In-The-Wild’ Evaluation of a Mobile System to Promote the Ergonomic Transfer of Patients},
author = {Maximilian Dürr (University of Konstanz) and Carla Gröschel (University of Konstanz) and Ulrike Pfeil (University of Konstanz) and Harald Reiterer (University of Konstanz)},
url = {https://youtu.be/BJaKsSOjW4k, Video
https://www.twitter.com/HCIGroupKN, Twitter},
doi = {10.1145/3313831.3376851},
year = {2020},
date = {2020-04-26},
urldate = {2020-04-07},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
institution = {University of Konstanz},
abstract = {Nurses are frequently required to transfer patients as part of their daily duties. However, the manual transfer of patients is a major risk factor for injuries to the back. Although the Kinaesthetics Care Conception can help to address this issue, existing support for the integration of the concept into nursing-care practice is low. We present NurseCare, a mobile system that aims to promote the practical application of ergonomic patient transfers based on the Kinaesthetics Care Conception. NurseCare consists of a wearable and a smartphone app. Key features of NurseCare include mobile accessible instructions for ergonomic patient transfers, in-situ feedback for the risky bending of the back, and long-term feedback. We evaluated NurseCare in a nine participant ‘in-the-wild’ evaluation. Results indicate that NurseCare can facilitate ergonomic work while providing a high user experience adequate to the nurses’ work domain, and reveal how NurseCare can be incorporated in given practices.},
type = {Full Paper},
keywords = {Full Paper, Honorable Mention},
pubstate = {published},
tppubtype = {inproceedings}
}

On Conducting Security Developer Studies with CS Students: Examining a Password-Storage Study with CS Students, Freelancers, and Company Developers
Alena Naiakshina (University of Bonn), Anastasia Danilova (University of Bonn), Eva Gerlitz (Fraunhofer FKIE), Matthew Smith (University of Bonn / Fraunhofer FKIE)
Tags: Full Paper | Links:
@inproceedings{NaiakshinaConducting,
title = {On Conducting Security Developer Studies with CS Students: Examining a Password-Storage Study with CS Students, Freelancers, and Company Developers},
author = {Alena Naiakshina (University of Bonn) and Anastasia Danilova (University of Bonn) and Eva Gerlitz (Fraunhofer FKIE) and Matthew Smith (University of Bonn / Fraunhofer FKIE)},
doi = {10.1145/3313831.3376791},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
One does not Simply RSVP: Mental Workload to Select Speed Reading Parameters using Electroencephalography
Thomas Kosch (LMU Munich), Albrecht Schmidt (LMU Munich), Simon Thanheiser (LMU Munich), Lewis L. Chuang (LMU Munich)
Abstract | Tags: Full Paper | Links:
@inproceedings{KoschRSVP,
title = {One does not Simply RSVP: Mental Workload to Select Speed Reading Parameters using Electroencephalography},
author = {Thomas Kosch (LMU Munich) and Albrecht Schmidt (LMU Munich) and Simon Thanheiser (LMU Munich) and Lewis L. Chuang (LMU Munich)},
url = {https://www.twitter.com/mimuc, Twitter},
doi = {10.1145/3313831.3376766},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Rapid Serial Visual Presentation (RSVP) has gained popularity as a method for presenting text on wearable devices with limited screen space. Nonetheless, it remains unclear how to calibrate RSVP display parameters, such as spatial alignments or presentation rates, to suit the reader’s information processing ability at high presentation speeds. Existing methods rely on comprehension and subjective workload scores, which are influenced by the user’s knowledge base and subjective perception. Here, we use electroencephalography (EEG) to directly determine how individual information processing varies with changes in RSVP display parameters. Eighteen participants read text excerpts with RSVP in a repeated-measures design that manipulated the Text Alignment and Presentation Speed of text representation. We evaluated how predictive EEG metrics were of gains in reading speed, subjective workload, and text comprehension. We found significant correlations between EEG and increasing Presentation Speeds and propose how EEG can be used for dynamic selection of RSVP parameters.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}

ORCSolver: An Efficient Solver for Adaptive GUI Layout with OR-Constraints
Yue Jiang (University of Maryland / MPI / Saarland University), Wolfgang Stuerzlinger (Simon Fraser University), Matthias Zwicker (University of Maryland), Christof Lutteroth (University of Bath)
Tags: Full Paper | Links:
@inproceedings{JiangORCSolver,
title = {ORCSolver: An Efficient Solver for Adaptive GUI Layout with OR-Constraints},
author = {Yue Jiang (University of Maryland / MPI / Saarland University) and Wolfgang Stuerzlinger (Simon Fraser University) and Matthias Zwicker (University of Maryland) and Christof Lutteroth (University of Bath)},
doi = {10.1145/3313831.3376610},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Outline Pursuits: Gaze-assisted Selection of Occluded Objects in Virtual Reality
Ludwig Sidenmark (Lancaster University), Christopher Clarke (Lancaster University), Xuesong Zhang (Katholieke Universiteit Leuven), Jenny Phu (Ludwig Maximilian University of Munich), Hans Gellersen (Aarhus University)
Tags: Full Paper | Links:
@inproceedings{SidenmarkOutline,
title = {Outline Pursuits: Gaze-assisted Selection of Occluded Objects in Virtual Reality},
author = {Ludwig Sidenmark (Lancaster University) and Christopher Clarke (Lancaster University) and Xuesong Zhang (Katholieke Universiteit Leuven) and Jenny Phu (Ludwig Maximilian University of Munich) and Hans Gellersen (Aarhus University)},
doi = {10.1145/3313831.3376438},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Performance and Experience of Throwing in Virtual Reality
Tim Zindulka (University of Bayreuth), Myroslav Bachynskyi (Bayreuth University), Jörg Müller (University of Bayreuth)
Tags: Full Paper | Links:
@inproceedings{ZindulkaPerformance,
title = {Performance and Experience of Throwing in Virtual Reality},
author = {Tim Zindulka (University of Bayreuth) and Myroslav Bachynskyi (Bayreuth University) and Jörg Müller (University of Bayreuth)},
doi = {10.1145/3313831.3376639},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
PhysioSkin: Rapid Fabrication of Skin-Conformal Physiological Interfaces
Aditya Shekhar Nittala (Saarland University, Saarland Informatics Campus), Arshad Khan (Saarland University, Saarland Informatics Campus, INM-Leibniz Institute for New Materials, Saarbrücken), Klaus Kruttwig (INM-Leibniz Institute for New Materials, Saarbrücken), Tobias Kraus (INM-Leibniz Institute for New Materials, Saarbrücken), Jürgen Steimle (Saarland University)
Abstract | Tags: Full Paper | Links:
@inproceedings{NittalaPhysioSkin,
title = {PhysioSkin: Rapid Fabrication of Skin-Conformal Physiological Interfaces},
author = {Aditya Shekhar Nittala (Saarland University, Saarland Informatics Campus) and Arshad Khan (Saarland University, Saarland Informatics Campus, INM-Leibniz Institute for New Materials, Saarbrücken) and Klaus Kruttwig (INM-Leibniz Institute for New Materials, Saarbrücken) and Tobias Kraus (INM-Leibniz Institute for New Materials, Saarbrücken) and Jürgen Steimle (Saarland University) },
url = {https://www.youtube.com/watch?v=qC9kKDN8aW8, Video},
doi = {10.1145/3313831.3376366},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Advances in rapid prototyping platforms have made physiological sensing accessible to a wide audience. However, off-the-shelf electrodes commonly used for capturing biosignals are typically thick, non-conformal and do not support customization. We present PhysioSkin, a rapid, do-it-yourself prototyping method for fabricating custom multi-modal physiological sensors, using commercial materials and a commodity desktop inkjet printer. It realizes ultrathin skin-conformal patches (∼1 µm) and interactive textiles that capture sEMG, EDA and ECG signals. It further supports fabricating devices with custom levels of thickness and stretchability. We present detailed fabrication explorations on multiple substrate materials, functional inks and skin adhesive materials. Informed from the literature, we also provide design recommendations for each of the modalities. Evaluation results show that the sensor patches achieve a high signal-to-noise ratio. Example applications demonstrate the functionality and versatility of our approach for prototyping the next generation of physiological devices that intimately couple with the human body.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Platform for Studying Self-Repairing Auto-Corrections in Mobile Text Entry based on Brain Activity, Gaze, and Context
Felix Putze (University of Bremen), Tilman Ihrig (University of Bremen), Tanja Schultz (University of Bremen), Wolfganz Stuerzlinger (Simon Fraser University)
Abstract | Tags: Full Paper | Links:
@inproceedings{PutzePlatform,
title = {Platform for Studying Self-Repairing Auto-Corrections in Mobile Text Entry based on Brain Activity, Gaze, and Context},
author = {Felix Putze (University of Bremen) and Tilman Ihrig (University of Bremen) and Tanja Schultz (University of Bremen) and Wolfganz Stuerzlinger (Simon Fraser University)},
doi = {10.1145/3313831.3376815},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Auto-correction is a standard feature of mobile text entry. While the performance of state-of-the-art auto-correct methods is usually relatively high, any errors that occur are cumbersome to repair, interrupt the flow of text entry, and challenge the user's agency over the process. In this paper, we describe a system that aims to automatically identify and repair auto-correction errors. This system comprises a multi-modal classifier for detecting auto-correction errors from brain activity, eye gaze, and context information, as well as a strategy to repair such errors by replacing the erroneous correction or suggesting alternatives. We integrated both parts in a generic Android component and thus present a research platform for studying self-repairing end-to-end systems. To demonstrate its feasibility, we performed a user study to evaluate the classification performance and usability of our approach.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Podoportation: Foot-Based Locomotion in Virtual Reality
Julius von Willich (TU Darmstadt), Martin Schmitz (TU Darmstadt), Florian Müller (TU Darmstadt), Daniel Schmitt (TU Darmstadt), Max Mühlhäuser (TU Darmstadt)
Abstract | Tags: Full Paper | Links:
@inproceedings{WillichPodoportation,
title = {Podoportation: Foot-Based Locomotion in Virtual Reality},
author = {Julius von Willich (TU Darmstadt) and Martin Schmitz (TU Darmstadt) and Florian Müller (TU Darmstadt) and Daniel Schmitt (TU Darmstadt) and Max Mühlhäuser (TU Darmstadt)},
url = {https://youtu.be/HGP5MN_e-k0, Video
https://twitter.com/search?q=%23teamdarmstadt&src=typed_query&f=live, Twitter},
doi = {10.1145/3313831.3376626},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Virtual Reality (VR) allows for infinitely large environments. However, the physical traversable space is always limited by real-world boundaries. This discrepancy between physical and virtual dimensions renders traditional locomotion methods used in real world unfeasible. To alleviate these limitations, research proposed various artificial locomotion concepts such as teleportation, treadmills, and redirected walking. However, these concepts occupy the user's hands, require complex hardware or large physical spaces. In this paper, we contribute nine VR locomotion concepts for foot-based and hands-free locomotion, relying on the 3D position of the user's feet and the pressure applied to the sole as input modalities. We evaluate our concepts and compare them to state-of-the-art point & teleport technique in a controlled experiment with 20 participants. The results confirm the viability of our approaches for hands-free and engaging locomotion. Further, based on the findings, we contribute a wireless hardware prototype implementation.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}

PolySense: Augmenting Textiles with Electrical Functionality using In-Situ Polymerization
Cedric Honnet (MIT Media Lab), Hannah Perner-Wilson (Kobakant), Marc Teyssier (Télécom Paris), Bruno Fruchard (Saarland University, SIC), Jürgen Steimle (Saarland University), Ana C. Baptista (CENIMAT/I3N) Paul Strohmeier (Saarland University)
Abstract | Tags: Full Paper | Links:
@inproceedings{HonnetPolySense,
title = {PolySense: Augmenting Textiles with Electrical Functionality using In-Situ Polymerization},
author = {Cedric Honnet (MIT Media Lab) and Hannah Perner-Wilson (Kobakant) and Marc Teyssier (Télécom Paris) and Bruno Fruchard (Saarland University, SIC) and Jürgen Steimle (Saarland University) and Ana C. Baptista (CENIMAT/I3N) Paul Strohmeier (Saarland University)},
doi = {10.1145/3313831.33768},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {We present a method for enabling arbitrary textiles to sense pressure and deformation: In-situ polymerization supports integration of piezoresistive properties at the material level, preserving a textile's haptic and mechanical characteristics. We demonstrate how to enhance a wide set of fabrics and yarns using only readily available tools. To further support customisation by the designer, we present methods for patterning, as needed to create circuits and sensors, and demonstrate how to combine areas of different conductance in one material. Technical evaluation results demonstrate the performance of sensors created using our method is comparable to off-the-shelf piezoresistive textiles. As application examples, we demonstrate rapid manufacturing of on-body interfaces, tie-dyed motion-capture clothing, and zippers that act as potentiometers.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}

Power Play: How the Need to Empower or Overpower Other Players Predicts Preferences in League of Legends
Susanne Poeller (University of Trier), Nicola Baumann (University of Trier), Regan Mandryk (University of Saskatchewan)
Tags: Full Paper, Honorable Mention | Links:
@inproceedings{PoellerPower,
title = {Power Play: How the Need to Empower or Overpower Other Players Predicts Preferences in League of Legends},
author = {Susanne Poeller (University of Trier) and Nicola Baumann (University of Trier) and Regan Mandryk (University of Saskatchewan)},
doi = {10.1145/3313831.3376193},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Full Paper, Honorable Mention},
pubstate = {published},
tppubtype = {inproceedings}
}
Predicting Mid-Air Interaction Movements and Fatigue Using Deep Reinforcement Learning
Noshaba Cheema (Max-Planck Institute for Informatics / German Research Center for Artificial Intelligence (DFKI)), Laura Frey-Law (University of Iowa), Kourosh Naderi (Aalto University), Jaakko Lehtinen (Aalto University / NVIDIA Research), Philipp Slusallek (Saarland University / German Research Center for Artificial Intelligence (DFKI)), Perttu Hämäläinen (Aalto University)
Abstract | Tags: Full Paper | Links:
@inproceedings{CheemaPredicting,
title = {Predicting Mid-Air Interaction Movements and Fatigue Using Deep Reinforcement Learning},
author = {Noshaba Cheema (Max-Planck Institute for Informatics / German Research Center for Artificial Intelligence (DFKI)) and Laura Frey-Law (University of Iowa) and Kourosh Naderi (Aalto University) and Jaakko Lehtinen (Aalto University / NVIDIA Research) and Philipp Slusallek (Saarland University / German Research Center for Artificial Intelligence (DFKI)) and Perttu Hämäläinen (Aalto University)},
doi = {10.1145/3313831.3376701},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {A common problem of mid-air interaction is excessive arm fatigue, known as the "Gorilla arm" effect. To predict and prevent such problems at a low cost, we investigate user testing of mid-air interaction without real users, utilizing biomechanically simulated AI agents trained using deep Reinforcement Learning (RL). We implement this in a pointing task and four experimental conditions, demonstrating that the simulated fatigue data matches human fatigue data. We also compare two effort models: 1) instantaneous joint torques commonly used in computer animation and robotics, and 2) the recent Three Compartment Controller (3CC-r) model from biomechanical literature. 3CC-r yields movements that are both more efficient and relaxed, whereas with instantaneous joint torques, the RL agent can easily generate movements that are quickly tiring or only reach the targets slowly and inaccurately. Our work demonstrates that deep RL combined with the 3CC-r provides a viable tool for predicting both interaction movements and user experience in silico, without users.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}

Quantification of Users' Visual Attention During Everyday Mobile Device Interactions
Mihai Bâce (ETH Zürich), Sander Staal (ETH Zürich), Andreas Bulling (University of Stuttgart)
Abstract | Tags: Full Paper | Links:
@inproceedings{BaceQuantification,
title = {Quantification of Users' Visual Attention During Everyday Mobile Device Interactions},
author = {Mihai Bâce (ETH Zürich) and Sander Staal (ETH Zürich) and Andreas Bulling (University of Stuttgart)},
doi = {10.1145/3313831.3376449},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {We present the first real-world dataset and quantitative evaluation of visual attention of mobile device users in-situ, i.e. while using their devices during everyday routine. Understanding user attention is a core research challenge in mobile HCI but previous approaches relied on usage logs or self-reports that are only proxies and consequently do neither reflect attention completely nor accurately. Our evaluations are based on Everyday Mobile Visual Attention (EMVA) a new 32-participant dataset containing around 472 hours of video snippets recorded over more than two weeks in real life using the front-facing camera as well as associated usage logs, interaction events, and sensor data. Using an eye contact detection method, we are first to quantify the highly dynamic nature of everyday visual attention across users, mobile applications, and usage contexts. We discuss key insights from our analyses that highlight the potential and inform the design of future mobile attentive user interfaces.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Rapid Iron-On User Interfaces: Hands-on Fabrication of Interactive Textile Prototypes
Konstantin Klamka (Technische Universität Dresden), Raimund Dachselt (Technische Universität Dresden), Jürgen Steimle (Saarland University)
Abstract | Tags: Full Paper, Honorable Mention | Links:
@inproceedings{KlamkaRapid,
title = {Rapid Iron-On User Interfaces: Hands-on Fabrication of Interactive Textile Prototypes},
author = {Konstantin Klamka (Technische Universität Dresden) and Raimund Dachselt (Technische Universität Dresden) and Jürgen Steimle (Saarland University)},
url = {https://youtu.be/FyPcMLBXIm0, Video
https://www.twitter.com/imldresden, Twitter},
doi = {10.1145/3313831.3376220},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
institution = {TU Dresden},
abstract = {Rapid prototyping of interactive textiles is still challenging, since manual skills, several processing steps, and expert knowledge are involved. We present Rapid Iron-On User Interfaces, a novel fabrication approach for empowering designers and makers to enhance fabrics with interactive functionalities. It builds on heat-activated adhesive materials consisting of smart textiles and printed electronics, which can be flexibly ironed onto the fabric to create custom interface functionality. To support rapid fabrication in a sketching-like fashion, we developed a handheld dispenser tool for directly applying continuous functional tapes of desired length as well as discrete patches. We introduce versatile compositions techniques that allow for creating complex circuits, utilizing commodity textile accessories and sketching custom-shaped I/O modules. We further contribute a comprehensive library of components for input, output, wiring and computing. Three example applications, results from technical experiments and expert reviews demonstrate the functionality, versatility and potential of this approach.},
keywords = {Full Paper, Honorable Mention},
pubstate = {published},
tppubtype = {inproceedings}
}

Recognizing Affiliation: Using Behavioural Traces to Predict the Quality of Social Interactions in Online Games
Julian Frommel (Ulm University / University of Saskatchewan), Valentin Sagl (University of Saskatchewan), Ansgar E. Depping (University of Saskatchewan), Colby Johanson (University of Saskatchewan), Matthew K. Miller (University of Saskatchewan), Regan L. Mandryk (University of Saskatchewan)
Abstract | Tags: Full Paper | Links:
@inproceedings{FrommelRecognizing,
title = {Recognizing Affiliation: Using Behavioural Traces to Predict the Quality of Social Interactions in Online Games},
author = {Julian Frommel (Ulm University / University of Saskatchewan) and Valentin Sagl (University of Saskatchewan) and Ansgar E. Depping (University of Saskatchewan) and Colby Johanson (University of Saskatchewan) and Matthew K. Miller (University of Saskatchewan) and Regan L. Mandryk (University of Saskatchewan)},
url = {https://www.twitter.com/mi_uulm, Twitter},
doi = {10.1145/3313831.3376446},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Online social interactions in multiplayer games can be supportive and positive or toxic and harmful; however, few methods caneasily assess interpersonal interaction quality in games. We use behavioural traces to predict affiliation between dyadic strangers, facilitated through their social interactions in an online gaming setting. We collected audio, video, in-game, and self-report data from 23 dyads, extracted 75 features, trained Random Forest and Support VectorMachine models, and evaluated their performance predicting binary (high/low) as well as continuous affiliation toward a partner. The models can predict both binary and continuous affiliation with up to 79.1% accuracy (F1) and 20.1% explained variance (R2) on unseen data, with features based on verbal communication demonstrating the highest potential. Our findings can inform the design of multiplayer games and game communities, and guide the development of systems for matchmaking and mitigating toxic behaviour in online games.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}

Robustness of Eye Movement Biometrics Against Varying Stimuli and Varying Trajectory Length
Christoph Schröder (University of Bremen), Sahar Mahdie Klim Al Zaidawi (University of Bremen), Martin H.U. Prinzler (University of Bremen), Sebastian Maneth (University of Bremen), Gabriel Zachmann (University of Bremen)
Abstract | Tags: Full Paper | Links:
@inproceedings{SchroederRobustness,
title = {Robustness of Eye Movement Biometrics Against Varying Stimuli and Varying Trajectory Length},
author = {Christoph Schröder (University of Bremen) and Sahar Mahdie Klim Al Zaidawi (University of Bremen) and Martin H.U. Prinzler (University of Bremen) and Sebastian Maneth (University of Bremen) and Gabriel Zachmann (University of Bremen)},
doi = {10.1145/3313831.3376534},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Recent results suggest that biometric identification based on human’s eye movement characteristics can be used for authentication. In this paper, we present three new methods and benchmark them against the state-of-the-art. The best of our new methods improves the state-of-the-art performance by 5.9 percentage points. Furthermore, we investigate some of the factors that affect the robustness of the recognition rate of different classifiers on gaze trajectories, such as the type of stimulus and the tracking trajectory length. We find that the state-of-the-art method only works well when using the same stimulus for testing that was used for training. By contrast, our novel method more than doubles the identification accuracy for these transfer cases. Furthermore, we find that with only 90 seconds of eye tracking data, 86.7 % accuracy can be achieved.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Sara, the Lecturer: Improving Learning in Online Education with a Scaffolding-Based Conversational Agent
Rainer Winkler (University of St. Gallen), Sebastian Hobert (University of Goettingen), Antti Salovaara (Aalto University), Matthias Söllner (University of Kassel), Jan Marco Leimeister (University of St. Gallen)
Tags: Full Paper | Links:
@inproceedings{WinklerSara,
title = {Sara, the Lecturer: Improving Learning in Online Education with a Scaffolding-Based Conversational Agent},
author = {Rainer Winkler (University of St. Gallen) and Sebastian Hobert (University of Goettingen) and Antti Salovaara (Aalto University) and Matthias Söllner (University of Kassel) and Jan Marco Leimeister (University of St. Gallen)},
doi = {10.1145/3313831.3376781},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Social Acceptability in HCI: A Survey of Methods, Measures, and Design Strategies
Marion Koelle (University of Oldenburg / Saarland University, Saarland Informatics Campus), Swamy Ananthanarayan (University of Oldenburg), Susanne Boll (University of Oldenburg)
Abstract | Tags: Full Paper, Honorable Mention | Links:
@inproceedings{KoelleSocial,
title = {Social Acceptability in HCI: A Survey of Methods, Measures, and Design Strategies},
author = {Marion Koelle (University of Oldenburg / Saarland University, Saarland Informatics Campus) and Swamy Ananthanarayan (University of Oldenburg) and Susanne Boll (University of Oldenburg)},
url = {https://www.twitter.com/hcioldenburg, Twitter},
doi = {10.1145/3313831.3376162},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {With the increasing ubiquity of personal devices, social acceptability of human-machine interactions has gained relevance and growing interest from the HCI community. Yet, there are no best practices or established methods for evaluating social acceptability. Design strategies for increasing social acceptability have been described and employed, but so far not been holistically appraised and evaluated. We offer a systematic literature analysis (N=69) of social acceptability in HCI and contribute a better understanding of current research practices, namely, methods employed, measures and design strategies. Our review identified an unbalanced distribution of study approaches, shortcomings in employed measures, and a lack of interweaving between empirical and artifact-creating approaches. The latter causes a discrepancy between design recommendations based on user research, and design strategies employed in artifact creation. Our survey lays the groundwork for a more nuanced evaluation of social acceptability, the development of best practices, and a future research agenda.},
keywords = {Full Paper, Honorable Mention},
pubstate = {published},
tppubtype = {inproceedings}
}

Social Technology Appropriation in Dementia: Investigating the Role of Caregivers in engaging People with Dementia with a Videogame-based Training System
David Unbehaun (Uni Siegen), Konstantin Aal (Uni Siegen), Daryoush Daniel Vaziri (Hochschue Bonn-Rhein-Siegen), Peter David Tolmie (Uni Siegen), Rainer Wieching (Uni Siegen), David Randall (Uni Siegen), Volker Wulf (Uni Siegen)
Abstract | Tags: Full Paper | Links:
@inproceedings{UnbehaunSocial,
title = {Social Technology Appropriation in Dementia: Investigating the Role of Caregivers in engaging People with Dementia with a Videogame-based Training System},
author = {David Unbehaun (Uni Siegen) and Konstantin Aal (Uni Siegen) and Daryoush Daniel Vaziri (Hochschue Bonn-Rhein-Siegen) and Peter David Tolmie (Uni Siegen) and Rainer Wieching (Uni Siegen) and David Randall (Uni Siegen) and Volker Wulf (Uni Siegen)},
doi = {10.1145/3313831.3376648},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {We present here the outcomes of a 4-month evaluation of the individual, social and institutional impact of a videogame-based training system. The everyday behavior and interactions of 52 PwD and 25 caregivers was studied qualitatively, focusing on the role played by caregivers in integrating the system into daily routines.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
TAGSwipe: Touch Assisted Gaze Swipe for Text Entry
Chandan Kumar (University of Koblenz), Ramin Hedeshy (University of Koblenz), Scott MacKenzie (York University), Steffen Staab (University of Stuttgart)
Abstract | Tags: Full Paper | Links:
@inproceedings{KumarTagswipe,
title = {TAGSwipe: Touch Assisted Gaze Swipe for Text Entry},
author = {Chandan Kumar (University of Koblenz) and Ramin Hedeshy (University of Koblenz) and Scott MacKenzie (York University) and Steffen Staab (University of Stuttgart)},
url = {https://www.twitter.com/AnalyticComp},
doi = {10.1145/3313831.3376317},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {The conventional dwell-based methods for text entry by gaze are typically slow and uncomfortable. A swipe-based method that maps gaze path into words offers an alternative. However, it requires the user to explicitly indicate the beginning and ending of a word, which is typically achieved by tedious gaze-only selection. This paper introduces TAGSwipe, a bi-modal method that combines the simplicity of touch with the speed of gaze for swiping through a word. The result is an efficient and comfortable dwell-free text entry method. In the lab study TAGSwipe achieved an average text entry rate of 15.46 wpm and significantly outperformed conventional swipe-based and dwell-based methods in efficacy and user satisfaction.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Telewalk: Towards Free and Endless Walking in Room-Scale Virtual Reality
Michael Rietzler (Ulm University), Martin Deubzer (Ulm University), Thomas Dreja (Ulm University), Enrico Rukzio (Ulm University)
Abstract | Tags: Full Paper | Links:
@inproceedings{RietzlerTelewalk,
title = {Telewalk: Towards Free and Endless Walking in Room-Scale Virtual Reality},
author = {Michael Rietzler (Ulm University) and Martin Deubzer (Ulm University) and Thomas Dreja (Ulm University) and Enrico Rukzio (Ulm University)},
url = {https://www.twitter.com/mi_uulm, Twitter},
doi = {10.1145/3313831.3376821},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Natural navigation in VR is challenging due to spatial limitations. While Teleportation enables navigation within very small physical spaces and without causing motion sickness symptoms, it may reduce the feeling of presence and spacial awareness. Redirected walking (RDW), in contrast, allows users to naturally walk while staying inside a finite, but still very large, physical space. We present Telewalk, a novel locomotion approach that combines curvature and translation gains known from RDW research in a perceivable way. This combination enables Telewalk to be applied even within a physical space of 3m x 3m. Utilizing the head rotation as input device enables directional changes without any physical turns to keep the user always on an optimal circular path inside the real world while freely walking inside the virtual one. In a user study we found that even though motion sickness susceptible participants reported respective symptoms, Telewalk did result in stronger feelings of presence and immersion and was seen as more natural then Teleportation.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}

The Low/High Index of Pupillary Activity
Andrew Duchowski (Clemson University), Krzysztof Krejtz (SWPS University of Social Sciences, Humanities), Nina Gehrer (University of Tübingen), Tanya Bafna (Technical University of Denmark), Per Bækgaard (Technical University of Denmark)
Tags: Full Paper | Links:
@inproceedings{DuchowskiLowHigh,
title = {The Low/High Index of Pupillary Activity},
author = {Andrew Duchowski (Clemson University) and Krzysztof Krejtz (SWPS University of Social Sciences and Humanities) and Nina Gehrer (University of Tübingen) and Tanya Bafna (Technical University of Denmark) and Per Bækgaard (Technical University of Denmark)},
doi = {10.1145/3313831.3376394},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
The Role of Eye Gaze in Security and Privacy Applications: Survey and Future HCI Research Directions
Christina Katsini (Human Opsis), Yasmeen Abdrabou (Bundeswehr University Munich), George E. Raptis (Human Opsis), Mohamed Khamis (University of Glasgow), Florian Alt (Bundeswehr University Munich)
Abstract | Tags: Full Paper, Honorable Mention | Links:
@inproceedings{KatsiniTheRole,
title = {The Role of Eye Gaze in Security and Privacy Applications: Survey and Future HCI Research Directions},
author = {Christina Katsini (Human Opsis) and Yasmeen Abdrabou (Bundeswehr University Munich) and George E. Raptis (Human Opsis) and Mohamed Khamis (University of Glasgow) and Florian Alt (Bundeswehr University Munich)},
doi = {10.1145/3313831.3376840},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {For the past 20 years, researchers have investigated the use of eye tracking in security applications. We present a holistic view on gaze-based security applications. In particular, we canvassed the literature and classify the utility of gaze in security applications into a) authentication, b) privacy protection, and c) gaze monitoring during security critical tasks. This allows us to chart several research directions, most importantly 1) conducting field studies of implicit and explicit gaze-based authentication due to recent advances in eye tracking, 2) research on gaze-based privacy protection and gaze monitoring in security critical tasks which are under-investigated yet very promising areas, and 3) understanding the privacy implications of pervasive eye tracking. We discuss the most promising opportunities and most pressing challenges of eye tracking for security that will shape research in gaze-based security applications for the next decade.},
keywords = {Full Paper, Honorable Mention},
pubstate = {published},
tppubtype = {inproceedings}
}
ThermalWear: Exploring Wearable On-chest Thermal Displays to Augment Voice Messages with Affect
Abdallah El Ali (Centrum Wiskunde Informatica), Xingyu Yang (Delft University of Technology), Swamy Ananthanarayan (University of Oldenburg), Thomas Röggla (Centrum Wiskunde Informatica), Jack Jansen (Centrum Wiskunde Informatica), Jess Hartcher-O’Brien (Delft University of Technology), Kaspar Jansen (Delft University of Technology), Pablo Cesar (Centrum Wiskunde Informatica / Delft University of Technology)
Abstract | Tags: Full Paper | Links:
@inproceedings{AliThermal,
title = {ThermalWear: Exploring Wearable On-chest Thermal Displays to Augment Voice Messages with Affect},
author = {Abdallah El Ali (Centrum Wiskunde Informatica) and Xingyu Yang (Delft University of Technology) and Swamy Ananthanarayan (University of Oldenburg) and Thomas Röggla (Centrum Wiskunde Informatica) and Jack Jansen (Centrum Wiskunde Informatica) and Jess Hartcher-O’Brien (Delft University of Technology) and Kaspar Jansen (Delft University of Technology) and Pablo Cesar (Centrum Wiskunde Informatica / Delft University of Technology)},
doi = {10.1145/3313831.3376682},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Voice is a rich modality for conveying emotions, however emotional prosody production can be situationally or medically impaired. Since thermal displays have been shown to evoke emotions, we explore how thermal stimulation can augment perception of neutrally-spoken voice messages with affect. We designed ThermalWear, a wearable on-chest thermal display, then tested in a controlled study (N=12) the effects of fabric, thermal intensity, and direction of change. Thereafter, we synthesized 12 neutrally-spoken voice messages, validated (N=7) them, then tested (N=12) if thermal stimuli can augment their perception with affect. We found warm and cool stimuli (a) can be perceived on the chest, and quickly without fabric (4.7-5s) (b) do not incur discomfort (c) generally increase arousal of voice messages and (d) increase / decrease message valence, respectively. We discuss how thermal displays can augment voice perception, which can enhance voice assistants and support individuals with emotional prosody impairments.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Therminator: Understanding the Interdependency of Visual and On-Body Thermal Feedback in Virtual Reality
Sebastian Günther (TU Darmstadt), Florian Müller (TU Darmstadt), Dominik Schön (TU Darmstadt), Omar Elmoghazy (GUC), Martin Schmitz (TU Darmstadt), Max Mühlhäuser (TU Darmstadt)
Abstract | Tags: Full Paper | Links:
@inproceedings{GuentherTherminator,
title = {Therminator: Understanding the Interdependency of Visual and On-Body Thermal Feedback in Virtual Reality},
author = {Sebastian Günther (TU Darmstadt) and Florian Müller (TU Darmstadt) and Dominik Schön (TU Darmstadt) and Omar Elmoghazy (GUC) and Martin Schmitz (TU Darmstadt) and Max Mühlhäuser (TU Darmstadt)},
url = {https://www.youtube.com/watch?v=w9FnG1eoWD8&feature=youtu.be, Video
https://twitter.com/search?q=%23teamdarmstadt&src=typed_query&f=live, Twitter},
doi = {10.1145/3313831.3376195},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Recent advances have made Virtual Reality (VR) more realistic than ever before. This improved realism is attributed to today's ability to increasingly appeal to human sensations, such as visual, auditory or tactile. While research also examines temperature sensation as an important aspect, the interdependency of visual and thermal perception in VR is still underexplored. In this paper, we propose Therminator, a thermal display concept that provides warm and cold on-body feedback in VR through heat conduction of flowing liquids with different temperatures. Further, we systematically evaluate the interdependency of different visual and thermal stimuli on the temperature perception of arm and abdomen with 25 participants. As part of the results, we found varying temperature perception depending on the stimuli, as well as increasing involvement of users during conditions with matching stimuli.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}

Towards Inclusive External Communication of Autonomous Vehicles for Pedestrians with Vision Impairments
Mark Colley (Ulm University), Marcel Walch (Ulm University), Jan Gugenheimer (Ulm University), Ali Askari (Ulm University), Enrico Rukzio (Ulm University)
Abstract | Tags: Full Paper, Honorable Mention | Links:
@inproceedings{ColleyTowards,
title = {Towards Inclusive External Communication of Autonomous Vehicles for Pedestrians with Vision Impairments},
author = {Mark Colley (Ulm University) and Marcel Walch (Ulm University) and Jan Gugenheimer (Ulm University) and Ali Askari (Ulm University) and Enrico Rukzio (Ulm University)},
url = {https://youtu.be/1L7zTJ86PE8, Video
https://www.twitter.com/mi_uulm, Twitter},
doi = {10.1145/3313831.3376472},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {People with vision impairments (VIP) are among the most vulnerable road users in traffic. Autonomous vehicles are believed to reduce accidents but still demand some form of external communication signaling relevant information to pedestrians. Recent research on the design of vehicle-pedestrian communication (VPC) focuses strongly on concepts for a non-disabled population. Our work presents an inclusive user-centered design for VPC, beneficial for both vision impaired and seeing pedestrians. We conducted a workshop with VIP (N=6), discussing current issues in road traffic and comparing communication concepts proposed by literature. A thematic analysis unveiled two important themes: number of communicating vehicles and content (affecting duration). Subsequently, we investigated these in a second user study in virtual reality (N=33, 8 VIP) comparing the VPC between groups of abilities. We found that trust and understanding is enhanced and cognitive load reduced when all relevant vehicles communicate; high content messages also reduce cognitive load.},
keywords = {Full Paper, Honorable Mention},
pubstate = {published},
tppubtype = {inproceedings}
}

TRACTUS: Understanding and Supporting Source Code Experimentation in Hypothesis-Driven Data Science
Krishna Subramanian (RWTH), Johannes Maas (RWTH), Jan Borchers (RWTH)
Abstract | Tags: Full Paper | Links:
@inproceedings{SubramanianTractus,
title = {TRACTUS: Understanding and Supporting Source Code Experimentation in Hypothesis-Driven Data Science},
author = {Krishna Subramanian (RWTH) and Johannes Maas (RWTH) and Jan Borchers (RWTH)},
url = {https://youtu.be/iP0aW731MUQ, Video},
doi = {10.1145/3313831.3376764},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Data scientists experiment heavily with their code, compromising code quality to obtain insights faster. We observed ten data scientists perform hypothesis-driven data science tasks, and analyzed their coding, commenting, and analysis practice. We found that they have difficulty keeping track of their code experiments. When revisiting exploratory code to write production code later, they struggle to retrace their steps and capture the decisions made and insights obtained, and have to rerun code frequently. To address these issues, we designed TRACTUS, a system extending the popular RStudio IDE, that detects, tracks, and visualizes code experiments in hypothesis-driven data science tasks. TRACTUS helps recall decisions and insights by grouping code experiments into hypotheses, and structuring information like code execution output and documentation. Our user studies show how TRACTUS improves data scientists' workflows, and suggest additional opportunities for improvement. TRACTUS is available as an open source RStudio IDE addin at http://hci.rwth-aachen.de/tractus.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}

Transparency of CHI Research Artifacts: Results of a Self-Reported Survey
Chat Wacharamanotham (University of Zürich), Lukas Eisenring (University of Zürich), Steve Haroz (Université Paris-Saclay, Inria), Florian Echtler (Bauhaus-Universität Weimar)
Tags: Best Paper, Full Paper | Links:
@inproceedings{WacharamanothamTransparency,
title = {Transparency of CHI Research Artifacts: Results of a Self-Reported Survey},
author = {Chat Wacharamanotham (University of Zürich) and Lukas Eisenring (University of Zürich) and Steve Haroz (Université Paris-Saclay, Inria) and Florian Echtler (Bauhaus-Universität Weimar)},
doi = {10.1145/3313831.3376448},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Best Paper, Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Trust versus Privacy: Using Connected Car Data in Peer-to-Peer Carsharing
Paul Bossauer (University of Siegen), Thomas Neifer (Hochschule Bonn-Rhein-Sieg Sankt Augustin), Gunnar Stevens (University of Siegen), Christina Pakusch (University of Siegen)
Abstract | Tags: Full Paper | Links:
@inproceedings{BossauerTrust,
title = {Trust versus Privacy: Using Connected Car Data in Peer-to-Peer Carsharing},
author = {Paul Bossauer (University of Siegen) and Thomas Neifer (Hochschule Bonn-Rhein-Sieg Sankt Augustin) and Gunnar Stevens (University of Siegen) and Christina Pakusch (University of Siegen)},
doi = {10.1145/3313831.3376555},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Trust is the lubricant of the sharing economy. This is true especially in peer-to-peer carsharing, in which one leaves a highly valuable good to a stranger in the hope of getting it back unscathed. Nowadays, ratings of other users are major mechanisms for establishing trust. To foster uptake of peer-to-peer carsharing, connected car technology opens new possibilities to support trust-building, e.g., by adding driving behavior statistics to users’ profiles. However, collecting such data intrudes into rentees' privacy. To explore the tension between the need for trust and privacy demands, we conducted three focus group and eight individual interviews. Our results show that connected car technologies can increase trust for car owners and rentees not only before but also during and after rentals. The design of such systems must allow a differentiation between information in terms of type, the context, and the negotiability of information disclosure.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}

Understanding the Heisenberg Effect of Spatial Interaction: A Selection Induced Error for Spatially Tracked Input Devices
Dennis Wolf (Ulm University), Jan Gugenheimer (Ulm University), Marco Combosch (Ulm University), Enrico Rukzio (Ulm University)
Abstract | Tags: Full Paper | Links:
@inproceedings{WolfUnderstanding,
title = {Understanding the Heisenberg Effect of Spatial Interaction: A Selection Induced Error for Spatially Tracked Input Devices},
author = {Dennis Wolf (Ulm University) and Jan Gugenheimer (Ulm University) and Marco Combosch (Ulm University) and Enrico Rukzio (Ulm University)},
url = {https://www.twitter.com/mi_uulm, Twitter},
doi = {10.1145/3313831.3376876},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Virtual and augmented reality head-mounted displays (HMDs) are currently heavily relying on spatially tracked input devices (STID) for interaction. These STIDs are all prone to the phenomenon that a discrete input (e.g., button press) will disturb the position of the tracker, resulting in a different selection point during ray-cast interaction (Heisenberg Effect of Spatial Interaction). Besides the knowledge of its existence, there is currently a lack of a deeper understanding of its severity, structure and impact on throughput and angular error during a selection task. In this work, we present a formal evaluation of the Heisenberg effect and the impact of body posture, arm position and STID degrees of freedom on its severity. In a Fitt’s law inspired user study (N=16), we found that the Heisenberg effect is responsible for 30.45% of the overall errors occurring during a pointing task, but can be reduced by 25.4% using a correction function.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}

Understanding what you feel: A Mobile Audio-Tactile System for Graphics Used at Schools with Students with Visual Impairment
Giuseppe Melfi (KIT-SZS), Karin Müller (KIT-SZS), Thorsten Schwarz (KIT-SZS), Gerhard Jaworek (KIT-SZS), Rainer Stiefelhagen (KIT-SZS)
Abstract | Tags: Full Paper | Links:
@inproceedings{MelfiUnderstanding,
title = {Understanding what you feel: A Mobile Audio-Tactile System for Graphics Used at Schools with Students with Visual Impairment},
author = {Giuseppe Melfi (KIT-SZS) and Karin Müller (KIT-SZS) and Thorsten Schwarz (KIT-SZS) and Gerhard Jaworek (KIT-SZS) and Rainer Stiefelhagen (KIT-SZS)},
doi = {10.1145/3313831.3376508},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {A lot of information is nowadays presented graphically. However, students with blindness do not have access to visual information. Providing an alternative text is not always the appropriate solution as exploring graphics to discover information independently is a fundamental part of the learning process. In this work, we introduce a mobile audio-tactile learning environment, which facilitates the incorporation of real educational material. We evaluate our system by comparing three methods of interaction with tactile graphics: A tactile graphic augmented by (1) a document with key index information in Braille, (2) a digital document with key index information and (3) the TPad system, an audio-tactile solution meeting the specific needs within the school context. Our study shows that the TPad system is suitable for educational environments. Moreover, compared to the other methods TPad is faster to explore tactile graphics and it suggests a promising effect on the memorization of information.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}

Vibrotactile Funneling Illusion and Localization Performance on the Head
Oliver Beren Kaul (Leibniz University Hannover), Michael Rohs (Leibniz University Hannover), Benjamin Simon (Leibniz University Hannover), Kerem Can Demir (Leibniz University Hannover), Kamillo Ferry (Leibniz University Hannover)
Abstract | Tags: Full Paper | Links:
@inproceedings{KaulVibrotactile,
title = {Vibrotactile Funneling Illusion and Localization Performance on the Head},
author = {Oliver Beren Kaul (Leibniz University Hannover) and Michael Rohs (Leibniz University Hannover) and Benjamin Simon (Leibniz University Hannover) and Kerem Can Demir (Leibniz University Hannover) and Kamillo Ferry (Leibniz University Hannover)},
url = {https://youtu.be/emySptGIP9Y, Video},
doi = {10.1145/3313831.3376335},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {The vibrotactile funneling illusion is the sensation of a single (non-existing) stimulus somewhere in-between the actual stimulus locations. Its occurrence depends upon body location, distance between the actuators, signal synchronization, and intensity. Related work has shown that the funneling illusion may occur on the forehead. We were able to reproduce these findings and explored five further regions to get a more complete picture of the occurrence of the funneling illusion on the head. The results of our study (24 participants) show that the actuator distance, for which the funneling illusion occurs, strongly depends upon the head region. Moreover, we evaluated the centralizing bias (smaller perceived than actual actuator distances) for different head regions, which also showed widely varying characteristics. We computed a detailed heat map of vibrotactile localization accuracies on the head. The results inform the design of future tactile head-mounted displays that aim to support the funneling illusion.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}

Virtual Field Studies: Conducting Studies on Public Displays in Virtual Reality
Ville Mäkelä (LMU Munich / Tampere University), Rivu Radiah (Bundeswehr University Munich), Saleh Alsherif (German University in Cairo), Mohamed Khamis (University of Glasgow), Chong Xiao (LMU Munich), Lisa Borchert (LMU Munich), Albrecht Schmidt (LMU Munich), Florian Alt (Bundeswehr University Munich)
Abstract | Tags: Full Paper | Links:
@inproceedings{VilleVirtual,
title = {Virtual Field Studies: Conducting Studies on Public Displays in Virtual Reality},
author = {Ville Mäkelä (LMU Munich / Tampere University) and Rivu Radiah (Bundeswehr University Munich) and Saleh Alsherif (German University in Cairo) and Mohamed Khamis (University of Glasgow) and Chong Xiao (LMU Munich) and Lisa Borchert (LMU Munich) and Albrecht Schmidt (LMU Munich) and Florian Alt (Bundeswehr University Munich)},
url = {https://www.twitter.com/mimuc, Twitter},
doi = {10.1145/3313831.3376796},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Field studies on public displays can be difficult, expensive, and time-consuming. We investigate the feasibility of using virtual reality (VR) as a test-bed to evaluate deployments of public displays. Specifically, we investigate whether results from virtual field studies, conducted in a virtual public space, would match the results from a corresponding real-world setting. We report on two empirical user studies where we compared audience behavior around a virtual public display in the virtual world to audience behavior around a real public display. We found that virtual field studies can be a powerful research tool, as in both studies we observed largely similar behavior between the settings. We discuss the opportunities, challenges, and limitations of using virtual reality to conduct field studies, and provide lessons learned from our work that can help researchers decide whether to employ VR in their research and what factors to account for if doing so.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}

Virtual Reality Without Vision: A Haptic and Auditory White Cane to Navigate Complex Virtual Worlds
Alexa Siu (Stanford University), Mike Sinclair (Microsoft Research), Robert Kovacs (Hasso Plattner Institute), Eyal Ofek (Microsoft Research), Christian Holz (Microsoft Research), Edward Cutrell (Microsoft Research)
Tags: Full Paper, Honorable Mention | Links:
@inproceedings{SiuVirtual,
title = {Virtual Reality Without Vision: A Haptic and Auditory White Cane to Navigate Complex Virtual Worlds},
author = {Alexa Siu (Stanford University) and Mike Sinclair (Microsoft Research) and Robert Kovacs (Hasso Plattner Institute) and Eyal Ofek (Microsoft Research) and Christian Holz (Microsoft Research) and Edward Cutrell (Microsoft Research)},
doi = {10.1145/3313831.3376353},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
keywords = {Full Paper, Honorable Mention},
pubstate = {published},
tppubtype = {inproceedings}
}
VRSketchIn: Exploring the Design Space of Pen and Tablet Interaction for 3D Sketching in Virtual Reality
Tobias Drey (Ulm University), Jan Gugenheimer (Ulm University), Julian Karlbauer (Ulm University), Maximilian Milo (Ulm University), Enrico Rukzio (Ulm University)
Abstract | Tags: Full Paper | Links:
@inproceedings{DreyVRSketchIn,
title = {VRSketchIn: Exploring the Design Space of Pen and Tablet Interaction for 3D Sketching in Virtual Reality},
author = {Tobias Drey (Ulm University) and Jan Gugenheimer (Ulm University) and Julian Karlbauer (Ulm University) and Maximilian Milo (Ulm University) and Enrico Rukzio (Ulm University)},
url = {https://youtu.be/99hIlAbfan4, Video
https://www.twitter.com/mi_uulm, Twitter},
doi = {10.1145/3313831.3376628},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Sketching in virtual reality (VR) enhances perception and understanding of 3D volumes, but is currently a challenging task, as spatial input devices (e.g., tracked controllers) do not provide any scaffolding or constraints for mid-air interaction. We present VRSketchIn, a VR sketching application using a 6DoF-tracked pen and a 6DoF-tracked tablet as input devices, combining unconstrained 3D mid-air with constrained 2D surface-based sketching. To explore what possibilities arise from this combination of 2D (pen on tablet) and 3D input (6DoF pen), we present a set of design dimensions and define the design space for 2D and 3D sketching interaction metaphors in VR. We categorize prior art inside our design space and implemented a subset of metaphors for pen and tablet sketching in our prototype. To gain a deeper understanding which specific sketching operations users perform with 2D and which with 3D metaphors, we present findings of usability walkthroughs with six participants.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}

Walk The Line: Leveraging Lateral Shifts of the Walking Path as an Input Modality for Head-Mounted Displays
Florian Müller (TU Darmstadt), Martin Schmitz (TU Darmstadt), Daniel Schmitt (TU Darmstadt), Sebastian Günther (TU Darmstadt), Markus Funk (TU Darmstadt), Max Mühlhäuser (TU Darmstadt)
Abstract | Tags: Full Paper | Links:
@inproceedings{MuellerWalk,
title = {Walk The Line: Leveraging Lateral Shifts of the Walking Path as an Input Modality for Head-Mounted Displays},
author = {Florian Müller (TU Darmstadt) and Martin Schmitz (TU Darmstadt) and Daniel Schmitt (TU Darmstadt) and Sebastian Günther (TU Darmstadt) and Markus Funk (TU Darmstadt) and Max Mühlhäuser (TU Darmstadt)},
url = {https://youtu.be/6-XrF6J9cTc, Video
https://twitter.com/search?q=%23teamdarmstadt&src=typed_query&f=live, Twitter},
doi = {10.1145/3313831.3376852},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {Recent technological advances have made head-mounted displays (HMDs) smaller and untethered, fostering the vision of ubiquitous interaction in a digitally augmented physical world. Consequently, a major part of the interaction with such devices will happen on the go, calling for interaction techniques that allow users to interact while walking. In this paper, we explore lateral shifts of the walking path as a hands-free input modality. The available input options are visualized as lanes on the ground parallel to the user's walking path. Users can select options by shifting the walking path sideways to the respective lane. We contribute the results of a controlled experiment with 18 participants, confirming the viability of our approach for fast, accurate, and joyful interactions. Further, based on the findings of the controlled experiment, we present three example applications.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}

Walking by Cycling: A Novel In-Place Locomotion User Interface for Seated Virtual Reality Experiences
Jann Philipp Freiwald (Uni Hamburg), Oscar Ariza (Uni Hamburg), Omar Janeh (Uni Hamburg), Frank Steinicke (Uni Hamburg)
Abstract | Tags: Full Paper | Links:
@inproceedings{FreiwaldWalking,
title = {Walking by Cycling: A Novel In-Place Locomotion User Interface for Seated Virtual Reality Experiences},
author = {Jann Philipp Freiwald (Uni Hamburg) and Oscar Ariza (Uni Hamburg) and Omar Janeh (Uni Hamburg) and Frank Steinicke (Uni Hamburg)},
doi = {10.1145/3313831.3376574},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
abstract = {We introduce VR Strider, a novel locomotion user interface (LUI) for seated virtual reality (VR) experiences, which maps cycling biomechanics of the user's legs to virtual walking movements. The core idea is to translate the motion of pedaling on a mini exercise bike to a corresponding walking animation of a virtual avatar while providing audio-based tactile feedback on virtual ground contacts. We conducted an experiment to evaluate the LUI and our novel anchor-turning rotation control method regarding task performance, spatial cognition, VR sickness, sense of presence, usability and comfort in a path-integration task. The results show that VR Strider has a significant positive effect on the participants' angular and distance estimation, sense of presence and feeling of comfort compared to other established locomotion techniques, such as teleportation and joystick-based navigation. A confirmatory study further indicates the necessity of synchronized avatar animations for virtual vehicles that rely on pedalling.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
Watch+Strap: Extending Smartwatches with Interactive StrapDisplays
Konstantin Klamka (Technische Universität Dresden), Tom Horak (Technische Universität Dresden), Raimund Dachselt (Technische Universität Dresden)
Abstract | Tags: Full Paper | Links:
@inproceedings{KlamkaWatchStrap,
title = {Watch+Strap: Extending Smartwatches with Interactive StrapDisplays},
author = {Konstantin Klamka (Technische Universität Dresden) and Tom Horak (Technische Universität Dresden) and Raimund Dachselt (Technische Universität Dresden) },
url = {https://youtu.be/Op8-gh5GSxI, Video
https://www.twitter.com/imldresden, Twitter},
doi = {10.1145/3313831.3376199},
year = {2020},
date = {2020-04-26},
booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems. CHI 2020},
publisher = {ACM},
institution = {TU Dresden},
abstract = {While smartwatches are widely adopted these days, their input and output space remains fairly limited by their screen size. We present StrapDisplays—interactive watchbands with embedded display and touch technologies—that enhance commodity watches and extend their input and output capabilities. After introducing the physical design space of these StrapDisplays, we explore how to combine a smartwatch and straps in a synergistic Watch+Strap system. Specifically, we propose multiple interface concepts that consider promising content distributions, interaction techniques, usage types, and display roles. For example, the straps can enrich watch apps, display visualizations, provide glanceable feedback, or help avoiding occlusion issues. Further, we provide a modular research platform incorporating three StrapDisplay prototypes and a flexible web-based software architecture, demonstrating the feasibility of our approach. Early brainstorming sessions with 15 participants informed our design process, while later interviews with six experts supported our concepts and provided valuable feedback for future developments.},
keywords = {Full Paper},
pubstate = {published},
tppubtype = {inproceedings}
}
