[{"volume":94,"author":[{"first_name":"Hendrik","orcid":"0000-0002-9613-5713","last_name":"Buschmeier","id":"76456","full_name":"Buschmeier, Hendrik"},{"first_name":"Heike M.","id":"27152","full_name":"Buhl, Heike M.","last_name":"Buhl"},{"first_name":"Friederike","full_name":"Kern, Friederike","last_name":"Kern"},{"first_name":"Angela","id":"57578","full_name":"Grimminger, Angela","last_name":"Grimminger"},{"first_name":"Helen","full_name":"Beierling, Helen","id":"50995","last_name":"Beierling"},{"first_name":"Josephine Beryl","full_name":"Fisher, Josephine Beryl","id":"56345","orcid":"0000-0002-9997-9241","last_name":"Fisher"},{"last_name":"Groß","orcid":"0000-0002-9593-7220","id":"93405","full_name":"Groß, André","first_name":"André"},{"last_name":"Horwath","full_name":"Horwath, Ilona","id":"68836","first_name":"Ilona"},{"full_name":"Klowait, Nils","id":"98454","orcid":"0000-0002-7347-099X","last_name":"Klowait","first_name":"Nils"},{"last_name":"Lazarov","orcid":"0009-0009-0892-9483","full_name":"Lazarov, Stefan Teodorov","id":"90345","first_name":"Stefan Teodorov"},{"first_name":"Michael","last_name":"Lenke","full_name":"Lenke, Michael"},{"first_name":"Vivien","last_name":"Lohmer","full_name":"Lohmer, Vivien"},{"first_name":"Katharina","orcid":"0000-0002-5676-8233","last_name":"Rohlfing","id":"50352","full_name":"Rohlfing, Katharina"},{"first_name":"Ingrid","id":"451","full_name":"Scharlau, Ingrid","last_name":"Scharlau","orcid":"0000-0003-2364-9489"},{"first_name":"Amit","full_name":"Singh, Amit","id":"91018","last_name":"Singh","orcid":"0000-0002-7789-1521"},{"full_name":"Terfloth, Lutz","id":"37320","last_name":"Terfloth","first_name":"Lutz"},{"last_name":"Vollmer","full_name":"Vollmer, Anna-Lisa","id":"86589","first_name":"Anna-Lisa"},{"first_name":"Yu","full_name":"Wang, Yu","last_name":"Wang"},{"first_name":"Annedore","last_name":"Wilmes","full_name":"Wilmes, Annedore"},{"first_name":"Britta","full_name":"Wrede, Britta","last_name":"Wrede"}],"date_updated":"2025-12-05T15:32:25Z","oa":"1","doi":"10.1016/j.cogsys.2025.101419","main_file_link":[{"open_access":"1","url":"https://www.sciencedirect.com/science/article/pii/S1389041725000993?via%3Dihub"}],"has_accepted_license":"1","publication_status":"published","intvolume":"        94","citation":{"apa":"Buschmeier, H., Buhl, H. M., Kern, F., Grimminger, A., Beierling, H., Fisher, J. B., Groß, A., Horwath, I., Klowait, N., Lazarov, S. T., Lenke, M., Lohmer, V., Rohlfing, K., Scharlau, I., Singh, A., Terfloth, L., Vollmer, A.-L., Wang, Y., Wilmes, A., &#38; Wrede, B. (2025). Forms of Understanding for XAI-Explanations. <i>Cognitive Systems Research</i>, <i>94</i>, Article 101419. <a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">https://doi.org/10.1016/j.cogsys.2025.101419</a>","bibtex":"@article{Buschmeier_Buhl_Kern_Grimminger_Beierling_Fisher_Groß_Horwath_Klowait_Lazarov_et al._2025, title={Forms of Understanding for XAI-Explanations}, volume={94}, DOI={<a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">10.1016/j.cogsys.2025.101419</a>}, number={101419}, journal={Cognitive Systems Research}, author={Buschmeier, Hendrik and Buhl, Heike M. and Kern, Friederike and Grimminger, Angela and Beierling, Helen and Fisher, Josephine Beryl and Groß, André and Horwath, Ilona and Klowait, Nils and Lazarov, Stefan Teodorov and et al.}, year={2025} }","mla":"Buschmeier, Hendrik, et al. “Forms of Understanding for XAI-Explanations.” <i>Cognitive Systems Research</i>, vol. 94, 101419, 2025, doi:<a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">10.1016/j.cogsys.2025.101419</a>.","short":"H. Buschmeier, H.M. Buhl, F. Kern, A. Grimminger, H. Beierling, J.B. Fisher, A. Groß, I. Horwath, N. Klowait, S.T. Lazarov, M. Lenke, V. Lohmer, K. Rohlfing, I. Scharlau, A. Singh, L. Terfloth, A.-L. Vollmer, Y. Wang, A. Wilmes, B. Wrede, Cognitive Systems Research 94 (2025).","chicago":"Buschmeier, Hendrik, Heike M. Buhl, Friederike Kern, Angela Grimminger, Helen Beierling, Josephine Beryl Fisher, André Groß, et al. “Forms of Understanding for XAI-Explanations.” <i>Cognitive Systems Research</i> 94 (2025). <a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">https://doi.org/10.1016/j.cogsys.2025.101419</a>.","ieee":"H. Buschmeier <i>et al.</i>, “Forms of Understanding for XAI-Explanations,” <i>Cognitive Systems Research</i>, vol. 94, Art. no. 101419, 2025, doi: <a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">10.1016/j.cogsys.2025.101419</a>.","ama":"Buschmeier H, Buhl HM, Kern F, et al. Forms of Understanding for XAI-Explanations. <i>Cognitive Systems Research</i>. 2025;94. doi:<a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">10.1016/j.cogsys.2025.101419</a>"},"department":[{"_id":"660"}],"user_id":"57578","_id":"61156","project":[{"_id":"111","name":"TRR 318; TP A01: Adaptives Erklären"},{"_id":"112","name":"TRR 318; TP A02: Verstehensprozess einer Erklärung beobachten und auswerten"},{"name":"TRR 318 - Subproject A3","_id":"113"},{"_id":"114","name":"TRR 318; TP A04: Integration des technischen Modells in das Partnermodell bei der Erklärung von digitalen Artefakten"},{"_id":"115","name":"TRR 318; TP A05: Echtzeitmessung der Aufmerksamkeit im Mensch-Roboter-Erklärdialog"},{"name":"TRR 318 - Subproject B3","_id":"122"},{"name":"TRR 318 - Subproject B5","_id":"123"},{"_id":"119","name":"TRR 318 - Project Area Ö"}],"file_date_updated":"2025-12-01T21:02:20Z","article_number":"101419","article_type":"original","type":"journal_article","status":"public","date_created":"2025-09-08T14:24:32Z","title":"Forms of Understanding for XAI-Explanations","quality_controlled":"1","year":"2025","language":[{"iso":"eng"}],"keyword":["understanding","explaining","explanations","explainable","AI","interdisciplinarity","comprehension","enabledness","agency"],"ddc":["006"],"publication":"Cognitive Systems Research","file":[{"relation":"main_file","success":1,"content_type":"application/pdf","access_level":"closed","file_name":"Buschmeier-etal-2025-COGSYS.pdf","file_id":"62730","file_size":10114981,"date_created":"2025-12-01T21:02:20Z","creator":"hbuschme","date_updated":"2025-12-01T21:02:20Z"}],"abstract":[{"text":"Explainability has become an important topic in computer science and artificial intelligence, leading to a subfield called Explainable Artificial Intelligence (XAI). The goal of providing or seeking explanations is to achieve (better) ‘understanding’ on the part of the explainee. However, what it means to ‘understand’ is still not clearly defined, and the concept itself is rarely the subject of scientific investigation. This conceptual article aims to present a model of forms of understanding for XAI-explanations and beyond. From an interdisciplinary perspective bringing together computer science, linguistics, sociology, philosophy and psychology, a definition of understanding and its forms, assessment, and dynamics during the process of giving everyday explanations are explored. Two types of understanding are considered as possible outcomes of explanations, namely enabledness, ‘knowing how’ to do or decide something, and comprehension, ‘knowing that’ – both in different degrees (from shallow to deep). Explanations regularly start with shallow understanding in a specific domain and can lead to deep comprehension and enabledness of the explanandum, which we see as a prerequisite for human users to gain agency. In this process, the increase of comprehension and enabledness are highly interdependent. Against the background of this systematization, special challenges of understanding in XAI are discussed.","lang":"eng"}]},{"citation":{"bibtex":"@inproceedings{Lazarov_Grimminger_2024, title={Variations in explainers’ gesture deixis in explanations related to the monitoring of explainees’ understanding}, volume={46}, booktitle={Proceedings of the Annual Meeting of the Cognitive Science Society}, author={Lazarov, Stefan Teodorov and Grimminger, Angela}, year={2024} }","mla":"Lazarov, Stefan Teodorov, and Angela Grimminger. “Variations in Explainers’ Gesture Deixis in Explanations Related to the Monitoring of Explainees’ Understanding.” <i>Proceedings of the Annual Meeting of the Cognitive Science Society</i>, vol. 46, 2024.","short":"S.T. Lazarov, A. Grimminger, in: Proceedings of the Annual Meeting of the Cognitive Science Society, 2024.","apa":"Lazarov, S. T., &#38; Grimminger, A. (2024). Variations in explainers’ gesture deixis in explanations related to the monitoring of explainees’ understanding. <i>Proceedings of the Annual Meeting of the Cognitive Science Society</i>, <i>46</i>.","ieee":"S. T. Lazarov and A. Grimminger, “Variations in explainers’ gesture deixis in explanations related to the monitoring of explainees’ understanding,” in <i>Proceedings of the Annual Meeting of the Cognitive Science Society</i>, Rotterdam, 2024, vol. 46.","chicago":"Lazarov, Stefan Teodorov, and Angela Grimminger. “Variations in Explainers’ Gesture Deixis in Explanations Related to the Monitoring of Explainees’ Understanding.” In <i>Proceedings of the Annual Meeting of the Cognitive Science Society</i>, Vol. 46, 2024.","ama":"Lazarov ST, Grimminger A. Variations in explainers’ gesture deixis in explanations related to the monitoring of explainees’ understanding. In: <i>Proceedings of the Annual Meeting of the Cognitive Science Society</i>. Vol 46. ; 2024."},"intvolume":"        46","year":"2024","publication_status":"published","main_file_link":[{"open_access":"1","url":"https://escholarship.org/uc/item/7dz8n8tf"}],"conference":{"end_date":"2024-07-27","location":"Rotterdam","name":"Sognitive Science Society","start_date":"2024-07-24"},"title":"Variations in explainers’ gesture deixis in explanations related to the monitoring of explainees’ understanding","date_created":"2024-11-18T13:40:09Z","author":[{"id":"90345","full_name":"Lazarov, Stefan Teodorov","last_name":"Lazarov","orcid":"0009-0009-0892-9483","first_name":"Stefan Teodorov"},{"last_name":"Grimminger","id":"57578","full_name":"Grimminger, Angela","first_name":"Angela"}],"volume":46,"oa":"1","date_updated":"2024-11-18T13:40:39Z","status":"public","abstract":[{"lang":"eng","text":"In this study on the use of gesture deixis during explanations, a sample of 24 videorecorded dyadic interactions of a board game explanation was analyzed. The relation between the use of gesture deixis by different explainers and their interpretation of explainees' understanding was investigated. In addition, we describe explainers' intra-individual variations related to their interactions with three different explainees consecutively. While we did not find a relation between interpretations of explainees' complete understanding and a decrease in explainers' use of gesture deixis, we demonstrated that the overall use of gesture deixis is related to the process of interactional monitoring and the attendance of a different explainee."}],"type":"conference","publication":"Proceedings of the Annual Meeting of the Cognitive Science Society","language":[{"iso":"eng"}],"keyword":["explanation","gesture deixis","monitoring","understanding"],"user_id":"90345","department":[{"_id":"660"}],"project":[{"grant_number":"438445824","name":"TRR 318 - A02: TRR 318 - Verstehensprozess einer Erklärung beobachten und auswerten (Teilprojekt A02)","_id":"112"}],"_id":"57204"},{"citation":{"ieee":"J. Paletschek, “A Paradigm to Investigate Social Signals of Understanding and Their Susceptibility to Stress,” presented at the 12th International Conference on  Affective Computing &#38; Intelligent Interaction, Glasgow, 2024, doi: <a href=\"https://doi.org/10.1109/ACII63134.2024.00040\">10.1109/ACII63134.2024.00040</a>.","chicago":"Paletschek, Jonas. “A Paradigm to Investigate Social Signals of Understanding and Their Susceptibility to Stress.” In <i>12th International Conference on  Affective Computing &#38; Intelligent Interaction</i>. IEEE, 2024. <a href=\"https://doi.org/10.1109/ACII63134.2024.00040\">https://doi.org/10.1109/ACII63134.2024.00040</a>.","ama":"Paletschek J. A Paradigm to Investigate Social Signals of Understanding and Their Susceptibility to Stress. In: <i>12th International Conference on  Affective Computing &#38; Intelligent Interaction</i>. IEEE; 2024. doi:<a href=\"https://doi.org/10.1109/ACII63134.2024.00040\">10.1109/ACII63134.2024.00040</a>","mla":"Paletschek, Jonas. “A Paradigm to Investigate Social Signals of Understanding and Their Susceptibility to Stress.” <i>12th International Conference on  Affective Computing &#38; Intelligent Interaction</i>, IEEE, 2024, doi:<a href=\"https://doi.org/10.1109/ACII63134.2024.00040\">10.1109/ACII63134.2024.00040</a>.","short":"J. Paletschek, in: 12th International Conference on  Affective Computing &#38; Intelligent Interaction, IEEE, 2024.","bibtex":"@inproceedings{Paletschek_2024, title={A Paradigm to Investigate Social Signals of Understanding and Their Susceptibility to Stress}, DOI={<a href=\"https://doi.org/10.1109/ACII63134.2024.00040\">10.1109/ACII63134.2024.00040</a>}, booktitle={12th International Conference on  Affective Computing &#38; Intelligent Interaction}, publisher={IEEE}, author={Paletschek, Jonas}, year={2024} }","apa":"Paletschek, J. (2024). A Paradigm to Investigate Social Signals of Understanding and Their Susceptibility to Stress. <i>12th International Conference on  Affective Computing &#38; Intelligent Interaction</i>. 12th International Conference on  Affective Computing &#38; Intelligent Interaction, Glasgow. <a href=\"https://doi.org/10.1109/ACII63134.2024.00040\">https://doi.org/10.1109/ACII63134.2024.00040</a>"},"publication_status":"published","has_accepted_license":"1","conference":{"name":"12th International Conference on  Affective Computing & Intelligent Interaction","start_date":"2024-09-15","end_date":"2024-09-18","location":"Glasgow"},"doi":"10.1109/ACII63134.2024.00040","author":[{"first_name":"Jonas","last_name":"Paletschek","full_name":"Paletschek, Jonas","id":"98941"}],"date_updated":"2025-09-16T07:57:53Z","status":"public","type":"conference","file_date_updated":"2025-09-15T11:18:01Z","user_id":"98941","department":[{"_id":"660"}],"project":[{"name":"TRR 318 - Teilprojekt A6 - Inklusive Ko-Konstruktion sozialer Signale des Verstehens","_id":"1200"}],"_id":"61273","year":"2024","title":"A Paradigm to Investigate Social Signals of Understanding and Their Susceptibility to Stress","date_created":"2025-09-15T11:24:56Z","publisher":"IEEE","file":[{"creator":"paletsch","date_created":"2025-09-15T11:18:01Z","date_updated":"2025-09-15T11:18:01Z","file_id":"61274","file_name":"ACII2024_Camera_Ready.pdf","access_level":"closed","file_size":8807478,"content_type":"application/pdf","relation":"main_file","success":1}],"abstract":[{"text":"In human-machine explanation interactions, such as tutoring systems or customer support chatbots, it is important for the machine explainer to infer the human user's understanding.  Nonverbal signals play an important role for expressing mental states like understanding and confusion in these interactions. However, an individual's expressions may vary depending on other factors. In cases where these factors are unknown, machine learning methods that infer understanding from nonverbal cues become unreliable. Stress for example has been shown to affect human expression, but it is not clear from the current research how stress affects the expression of understanding.\r\nTo address this gap, we design a paradigm that induces understanding and confusion through game rule explanations. During the explanations, self-perceived understanding and confusion are annotated by the participants. A stress condition is also introduced to enable the investigation of changes in the expression of social signals under stress.\r\nWe conducted a study to validate the stress induction and participants reported a statistically significant increase in stress during the stress condition compared to the neutral control condition. \r\nAdditionally, feedback from participants shows that the paradigm is effective in inducing understanding and confusion. \r\nThis paradigm paves the way for further studies investigating social signals of understanding to improve human-machine explanation interactions for varying contexts.","lang":"eng"}],"publication":"12th International Conference on  Affective Computing & Intelligent Interaction","language":[{"iso":"eng"}],"ddc":["150"],"keyword":["Understanding","Nonverbal Social Signals","Stress Induction","Explanation","Machine Learning Bias"]},{"type":"journal_article","status":"public","project":[{"_id":"115","name":"TRR 318 - A05: TRR 318 - Echtzeitmessung der Aufmerksamkeit im Mensch-Roboter-Erklärdialog (Teilprojekt A05)","grant_number":"438445824"}],"_id":"48543","user_id":"38219","department":[{"_id":"749"}],"article_type":"original","funded_apc":"1","publication_status":"published","citation":{"chicago":"Groß, A., Amit Singh, Ngoc Chi Banh, B. Richter, Ingrid Scharlau, Katharina J. Rohlfing, and B. Wrede. “Scaffolding the Human Partner by Contrastive Guidance in an Explanatory Human-Robot Dialogue.” <i>Frontiers in Robotics and AI</i> 10 (2023). <a href=\"https://doi.org/10.3389/frobt.2023.1236184\">https://doi.org/10.3389/frobt.2023.1236184</a>.","ieee":"A. Groß <i>et al.</i>, “Scaffolding the human partner by contrastive guidance in an explanatory human-robot dialogue,” <i>Frontiers in Robotics and AI</i>, vol. 10, 2023, doi: <a href=\"https://doi.org/10.3389/frobt.2023.1236184\">10.3389/frobt.2023.1236184</a>.","ama":"Groß A, Singh A, Banh NC, et al. Scaffolding the human partner by contrastive guidance in an explanatory human-robot dialogue. <i>Frontiers in Robotics and AI</i>. 2023;10. doi:<a href=\"https://doi.org/10.3389/frobt.2023.1236184\">10.3389/frobt.2023.1236184</a>","mla":"Groß, A., et al. “Scaffolding the Human Partner by Contrastive Guidance in an Explanatory Human-Robot Dialogue.” <i>Frontiers in Robotics and AI</i>, vol. 10, 2023, doi:<a href=\"https://doi.org/10.3389/frobt.2023.1236184\">10.3389/frobt.2023.1236184</a>.","short":"A. Groß, A. Singh, N.C. Banh, B. Richter, I. Scharlau, K.J. Rohlfing, B. Wrede, Frontiers in Robotics and AI 10 (2023).","bibtex":"@article{Groß_Singh_Banh_Richter_Scharlau_Rohlfing_Wrede_2023, title={Scaffolding the human partner by contrastive guidance in an explanatory human-robot dialogue}, volume={10}, DOI={<a href=\"https://doi.org/10.3389/frobt.2023.1236184\">10.3389/frobt.2023.1236184</a>}, journal={Frontiers in Robotics and AI}, author={Groß, A. and Singh, Amit and Banh, Ngoc Chi and Richter, B. and Scharlau, Ingrid and Rohlfing, Katharina J. and Wrede, B.}, year={2023} }","apa":"Groß, A., Singh, A., Banh, N. C., Richter, B., Scharlau, I., Rohlfing, K. J., &#38; Wrede, B. (2023). Scaffolding the human partner by contrastive guidance in an explanatory human-robot dialogue. <i>Frontiers in Robotics and AI</i>, <i>10</i>. <a href=\"https://doi.org/10.3389/frobt.2023.1236184\">https://doi.org/10.3389/frobt.2023.1236184</a>"},"intvolume":"        10","date_updated":"2024-06-26T08:01:50Z","oa":"1","author":[{"first_name":"A.","full_name":"Groß, A.","last_name":"Groß"},{"first_name":"Amit","last_name":"Singh","orcid":"0000-0002-7789-1521","id":"91018","full_name":"Singh, Amit"},{"first_name":"Ngoc Chi","last_name":"Banh","orcid":"0000-0002-5946-4542","id":"38219","full_name":"Banh, Ngoc Chi"},{"full_name":"Richter, B.","last_name":"Richter","first_name":"B."},{"first_name":"Ingrid","orcid":"0000-0003-2364-9489","last_name":"Scharlau","id":"451","full_name":"Scharlau, Ingrid"},{"first_name":"Katharina J.","id":"50352","full_name":"Rohlfing, Katharina J.","last_name":"Rohlfing"},{"last_name":"Wrede","full_name":"Wrede, B.","first_name":"B."}],"volume":10,"main_file_link":[{"url":"https://www.frontiersin.org/articles/10.3389/frobt.2023.1236184/full","open_access":"1"}],"doi":"10.3389/frobt.2023.1236184","publication":"Frontiers in Robotics and AI","abstract":[{"text":"Explanation has been identified as an important capability for AI-based systems, but research on systematic strategies for achieving understanding in interaction with such systems is still sparse. Negation is a linguistic strategy that is often used in explanations. It creates a contrast space between the affirmed and the negated item that enriches explaining processes with additional contextual information. While negation in human speech has been shown to lead to higher processing costs and worse task performance in terms of recall or action execution when used in isolation, it can decrease processing costs when used in context. So far, it has not been considered as a guiding strategy for explanations in human-robot interaction. We conducted an empirical study to investigate the use of negation as a guiding strategy in explanatory human-robot dialogue, in which a virtual robot explains tasks and possible actions to a human explainee to solve them in terms of gestures on a touchscreen. Our results show that negation vs. affirmation 1) increases processing costs measured as reaction time and 2) increases several aspects of task performance. While there was no significant effect of negation on the number of initially correctly executed gestures, we found a significantly lower number of attempts—measured as breaks in the finger movement data before the correct gesture was carried out—when being instructed through a negation. We further found that the gestures significantly resembled the presented prototype gesture more following an instruction with a negation as opposed to an affirmation. Also, the participants rated the benefit of contrastive vs. affirmative explanations significantly higher. Repeating the instructions decreased the effects of negation, yielding similar processing costs and task performance measures for negation and affirmation after several iterations. We discuss our results with respect to possible effects of negation on linguistic processing of explanations and limitations of our study.","lang":"eng"}],"keyword":["HRI","XAI","negation","understanding","explaining","touch interaction","gesture"],"language":[{"iso":"eng"}],"quality_controlled":"1","year":"2023","date_created":"2023-10-30T09:29:16Z","title":"Scaffolding the human partner by contrastive guidance in an explanatory human-robot dialogue"},{"keyword":["Attention","negation","contrastive  guidance","eye-movements","action understanding","event representation"],"language":[{"iso":"eng"}],"abstract":[{"lang":"eng","text":"<p>The study investigates two different ways of guiding the addressee of an explanation - an explainee, through action demonstration: contrastive and non-contrastive. Their effect was tested on attention to specific action elements (goal) as well as on event memory. In an eye-tracking experiment, participants were shown different motion videos that were either contrastive or non-contrastive with respect to the segments of movement presentation. Given that everyday action demonstration is often multimodal, the stimuli were created with re- spect to their visual and verbal presentation. For visual presentation, a video combined two movements in a contrastive (e.g., Up-motion following a Down-motion) or non-contrastive way (e.g., two Up-motions following each other). For verbal presentation, each video was combined with a sequence of instruction descriptions in the form of negative (i.e., contrastive) or assertive (i.e., non-contrastive) guidance. It was found that a) attention to the event goal increased for this condition in the later time window, and b) participants’ recall of the event was facilitated when a visually contrastive motion was combined with a verbal contrast.</p>"}],"publication":"Proceedings of the Annual Meeting of the Cognitive Science Society 45 (45)","title":"Contrastiveness in the context of action demonstration: an eye-tracking study on its effects on action perception and action recall","publisher":"Cognitive Science Society","date_created":"2023-07-15T12:16:42Z","year":"2023","quality_controlled":"1","_id":"46067","project":[{"grant_number":"438445824","name":"TRR 318 - A05: TRR 318 - Echtzeitmessung der Aufmerksamkeit im Mensch-Roboter-Erklärdialog (Teilprojekt A05)","_id":"115"}],"department":[{"_id":"749"},{"_id":"660"}],"user_id":"91018","status":"public","type":"conference","popular_science":"1","conference":{"location":"Sydney","name":"45th Annual Conference of the Cognitive Science Society"},"main_file_link":[{"url":"https://escholarship.org/uc/item/2w94t4cv","open_access":"1"}],"oa":"1","date_updated":"2023-09-27T13:51:42Z","author":[{"first_name":"Amit","last_name":"Singh","orcid":"0000-0002-7789-1521","full_name":"Singh, Amit","id":"91018"},{"first_name":"Katharina J.","last_name":"Rohlfing","id":"50352","full_name":"Rohlfing, Katharina J."}],"place":"Sydney, Australia","citation":{"bibtex":"@inproceedings{Singh_Rohlfing_2023, place={Sydney, Australia}, title={Contrastiveness in the context of action demonstration: an eye-tracking study on its effects on action perception and action recall}, booktitle={Proceedings of the Annual Meeting of the Cognitive Science Society 45 (45)}, publisher={Cognitive Science Society}, author={Singh, Amit and Rohlfing, Katharina J.}, year={2023} }","mla":"Singh, Amit, and Katharina J. Rohlfing. “Contrastiveness in the Context of Action Demonstration: An Eye-Tracking Study on Its Effects on Action Perception and Action Recall.” <i>Proceedings of the Annual Meeting of the Cognitive Science Society 45 (45)</i>, Cognitive Science Society, 2023.","short":"A. Singh, K.J. Rohlfing, in: Proceedings of the Annual Meeting of the Cognitive Science Society 45 (45), Cognitive Science Society, Sydney, Australia, 2023.","apa":"Singh, A., &#38; Rohlfing, K. J. (2023). Contrastiveness in the context of action demonstration: an eye-tracking study on its effects on action perception and action recall. <i>Proceedings of the Annual Meeting of the Cognitive Science Society 45 (45)</i>. 45th Annual Conference of the Cognitive Science Society, Sydney.","ama":"Singh A, Rohlfing KJ. Contrastiveness in the context of action demonstration: an eye-tracking study on its effects on action perception and action recall. In: <i>Proceedings of the Annual Meeting of the Cognitive Science Society 45 (45)</i>. Cognitive Science Society; 2023.","chicago":"Singh, Amit, and Katharina J. Rohlfing. “Contrastiveness in the Context of Action Demonstration: An Eye-Tracking Study on Its Effects on Action Perception and Action Recall.” In <i>Proceedings of the Annual Meeting of the Cognitive Science Society 45 (45)</i>. Sydney, Australia: Cognitive Science Society, 2023.","ieee":"A. Singh and K. J. Rohlfing, “Contrastiveness in the context of action demonstration: an eye-tracking study on its effects on action perception and action recall,” presented at the 45th Annual Conference of the Cognitive Science Society, Sydney, 2023."},"publication_status":"published","related_material":{"record":[{"id":"46067","relation":"contains","status":"public"}]}},{"publication":"International Journal of Social Robotics","abstract":[{"text":"Alignment is a phenomenon observed in human conversation: Dialog partners' behavior converges in many respects. Such alignment has been proposed to be automatic and the basis for communicating successfully. Recent research on human-computer dialog promotes a mediated communicative design account of alignment according to which the extent of alignment is influenced by interlocutors' beliefs about each other. Our work aims at adding to these findings in two ways. (a) Our work investigates alignment of manual actions, instead of lexical choice. (b) Participants interact with the iCub humanoid robot, instead of an artificial computer dialog system. Our results confirm that alignment also takes place in the domain of actions. We were not able to replicate the results of the original study in general in this setting, but in accordance with its findings, participants with a high questionnaire score for emotional stability and participants who are familiar with robots align their actions more to a robot they believe to be basic than to one they believe to be advanced. Regarding alignment over the course of an interaction, the extent of alignment seems to remain constant, when participants believe the robot to be advanced, but it increases over time, when participants believe the robot to be a basic version.","lang":"eng"}],"keyword":["learning","Human-robot interaction","Alignment","Robot social","Action understanding"],"language":[{"iso":"eng"}],"issue":"2","year":"2015","publisher":"Springer-Verlag","date_created":"2020-06-24T13:01:06Z","title":"Alignment to the Actions of a Robot","type":"journal_article","status":"public","_id":"17189","department":[{"_id":"749"}],"user_id":"14931","publication_identifier":{"issn":["1875-4791"]},"page":"241-252","intvolume":"         7","citation":{"apa":"Vollmer, A.-L., Rohlfing, K., Wrede, B., &#38; Cangelosi, A. (2015). Alignment to the Actions of a Robot. <i>International Journal of Social Robotics</i>, <i>7</i>(2), 241–252. <a href=\"https://doi.org/10.1007/s12369-014-0252-0\">https://doi.org/10.1007/s12369-014-0252-0</a>","mla":"Vollmer, Anna-Lisa, et al. “Alignment to the Actions of a Robot.” <i>International Journal of Social Robotics</i>, vol. 7, no. 2, Springer-Verlag, 2015, pp. 241–52, doi:<a href=\"https://doi.org/10.1007/s12369-014-0252-0\">10.1007/s12369-014-0252-0</a>.","bibtex":"@article{Vollmer_Rohlfing_Wrede_Cangelosi_2015, title={Alignment to the Actions of a Robot}, volume={7}, DOI={<a href=\"https://doi.org/10.1007/s12369-014-0252-0\">10.1007/s12369-014-0252-0</a>}, number={2}, journal={International Journal of Social Robotics}, publisher={Springer-Verlag}, author={Vollmer, Anna-Lisa and Rohlfing, Katharina and Wrede, Britta and Cangelosi, Angelo}, year={2015}, pages={241–252} }","short":"A.-L. Vollmer, K. Rohlfing, B. Wrede, A. Cangelosi, International Journal of Social Robotics 7 (2015) 241–252.","ama":"Vollmer A-L, Rohlfing K, Wrede B, Cangelosi A. Alignment to the Actions of a Robot. <i>International Journal of Social Robotics</i>. 2015;7(2):241-252. doi:<a href=\"https://doi.org/10.1007/s12369-014-0252-0\">10.1007/s12369-014-0252-0</a>","chicago":"Vollmer, Anna-Lisa, Katharina Rohlfing, Britta Wrede, and Angelo Cangelosi. “Alignment to the Actions of a Robot.” <i>International Journal of Social Robotics</i> 7, no. 2 (2015): 241–52. <a href=\"https://doi.org/10.1007/s12369-014-0252-0\">https://doi.org/10.1007/s12369-014-0252-0</a>.","ieee":"A.-L. Vollmer, K. Rohlfing, B. Wrede, and A. Cangelosi, “Alignment to the Actions of a Robot,” <i>International Journal of Social Robotics</i>, vol. 7, no. 2, pp. 241–252, 2015, doi: <a href=\"https://doi.org/10.1007/s12369-014-0252-0\">10.1007/s12369-014-0252-0</a>."},"date_updated":"2023-02-01T16:07:40Z","volume":7,"author":[{"full_name":"Vollmer, Anna-Lisa","last_name":"Vollmer","first_name":"Anna-Lisa"},{"id":"50352","full_name":"Rohlfing, Katharina","last_name":"Rohlfing","first_name":"Katharina"},{"full_name":"Wrede, Britta","last_name":"Wrede","first_name":"Britta"},{"first_name":"Angelo","full_name":"Cangelosi, Angelo","last_name":"Cangelosi"}],"doi":"10.1007/s12369-014-0252-0"}]
