[{"user_id":"57578","department":[{"_id":"660"}],"project":[{"_id":"112","name":"TRR 318; TP A02: Verstehensprozess einer Erklärung beobachten und auswerten"},{"name":"TRR 318; TP A01: Adaptives Erklären","_id":"111"},{"_id":"115","name":"TRR 318; TP A05: Echtzeitmessung der Aufmerksamkeit im Mensch-Roboter-Erklärdialog"},{"name":"TRR 318 - Subproject A3","_id":"113"},{"_id":"118","name":"TRR 318: Project Area INF"}],"_id":"61323","type":"book_chapter","status":"public","editor":[{"full_name":"Rohlfing, Katharina J.","last_name":"Rohlfing","first_name":"Katharina J."},{"full_name":"Främling, Kary","last_name":"Främling","first_name":"Kary"},{"first_name":"Suzana","last_name":"Alpsancar","full_name":"Alpsancar, Suzana"},{"first_name":"Kirsten","full_name":"Thommes, Kirsten","last_name":"Thommes"},{"full_name":"Lim, Brian Y.","last_name":"Lim","first_name":"Brian Y."}],"author":[{"first_name":"Britta","last_name":"Wrede","full_name":"Wrede, Britta"},{"first_name":"Hendrik","last_name":"Buschmeier","full_name":"Buschmeier, Hendrik"},{"first_name":"Katharina Justine","last_name":"Rohlfing","orcid":"0000-0002-5676-8233","full_name":"Rohlfing, Katharina Justine","id":"50352"},{"first_name":"Meisam","full_name":"Booshehri, Meisam","last_name":"Booshehri"},{"first_name":"Angela","full_name":"Grimminger, Angela","id":"57578","last_name":"Grimminger"}],"oa":"1","date_updated":"2026-03-19T12:38:37Z","main_file_link":[{"open_access":"1"}],"doi":"10.1007/978-981-96-5290-7_12","related_material":{"link":[{"relation":"original","url":"https://link.springer.com/chapter/10.1007/978-981-96-5290-7_12"}]},"publication_status":"epub_ahead","publication_identifier":{"eisbn":["978-981-96-5290-7"]},"citation":{"ama":"Wrede B, Buschmeier H, Rohlfing KJ, Booshehri M, Grimminger A. Incremental communication. In: Rohlfing KJ, Främling K, Alpsancar S, Thommes K, Lim BY, eds. <i>Social Explainable AI</i>. Springer; 2026:227-245. doi:<a href=\"https://doi.org/10.1007/978-981-96-5290-7_12\">10.1007/978-981-96-5290-7_12</a>","chicago":"Wrede, Britta, Hendrik Buschmeier, Katharina Justine Rohlfing, Meisam Booshehri, and Angela Grimminger. “Incremental Communication.” In <i>Social Explainable AI</i>, edited by Katharina J. Rohlfing, Kary Främling, Suzana Alpsancar, Kirsten Thommes, and Brian Y. Lim, 227–45. Springer, 2026. <a href=\"https://doi.org/10.1007/978-981-96-5290-7_12\">https://doi.org/10.1007/978-981-96-5290-7_12</a>.","ieee":"B. Wrede, H. Buschmeier, K. J. Rohlfing, M. Booshehri, and A. Grimminger, “Incremental communication,” in <i>Social Explainable AI</i>, K. J. Rohlfing, K. Främling, S. Alpsancar, K. Thommes, and B. Y. Lim, Eds. Springer, 2026, pp. 227–245.","apa":"Wrede, B., Buschmeier, H., Rohlfing, K. J., Booshehri, M., &#38; Grimminger, A. (2026). Incremental communication. In K. J. Rohlfing, K. Främling, S. Alpsancar, K. Thommes, &#38; B. Y. Lim (Eds.), <i>Social Explainable AI</i> (pp. 227–245). Springer. <a href=\"https://doi.org/10.1007/978-981-96-5290-7_12\">https://doi.org/10.1007/978-981-96-5290-7_12</a>","short":"B. Wrede, H. Buschmeier, K.J. Rohlfing, M. Booshehri, A. Grimminger, in: K.J. Rohlfing, K. Främling, S. Alpsancar, K. Thommes, B.Y. Lim (Eds.), Social Explainable AI, Springer, 2026, pp. 227–245.","mla":"Wrede, Britta, et al. “Incremental Communication.” <i>Social Explainable AI</i>, edited by Katharina J. Rohlfing et al., Springer, 2026, pp. 227–45, doi:<a href=\"https://doi.org/10.1007/978-981-96-5290-7_12\">10.1007/978-981-96-5290-7_12</a>.","bibtex":"@inbook{Wrede_Buschmeier_Rohlfing_Booshehri_Grimminger_2026, title={Incremental communication}, DOI={<a href=\"https://doi.org/10.1007/978-981-96-5290-7_12\">10.1007/978-981-96-5290-7_12</a>}, booktitle={Social Explainable AI}, publisher={Springer}, author={Wrede, Britta and Buschmeier, Hendrik and Rohlfing, Katharina Justine and Booshehri, Meisam and Grimminger, Angela}, editor={Rohlfing, Katharina J. and Främling, Kary and Alpsancar, Suzana and Thommes, Kirsten and Lim, Brian Y.}, year={2026}, pages={227–245} }"},"page":"227-245","language":[{"iso":"eng"}],"publication":"Social Explainable AI","date_created":"2025-09-17T10:16:36Z","publisher":"Springer","title":"Incremental communication","quality_controlled":"1","year":"2026"},{"_id":"61112","project":[{"_id":"112","name":"TRR 318; TP A02: Verstehensprozess einer Erklärung beobachten und auswerten"},{"_id":"111","name":"TRR 318; TP A01: Adaptives Erklären"},{"name":"TRR 318; TP A05: Echtzeitmessung der Aufmerksamkeit im Mensch-Roboter-Erklärdialog","_id":"115"},{"name":"TRR 318 - Subproject B5","_id":"123"}],"department":[{"_id":"660"}],"user_id":"57578","language":[{"iso":"eng"}],"publication":"Social Explainable AI","type":"book_chapter","editor":[{"first_name":"Katharina","full_name":"Rohlfing, Katharina","last_name":"Rohlfing"},{"full_name":"Främling, Kary","last_name":"Främling","first_name":"Kary"},{"first_name":"Kirsten","last_name":"Thommes","full_name":"Thommes, Kirsten"},{"first_name":"Suzana","last_name":"Alpsancar","full_name":"Alpsancar, Suzana"},{"last_name":"Lim","full_name":"Lim, Brian Y.","first_name":"Brian Y."}],"status":"public","date_updated":"2026-03-20T09:11:58Z","oa":"1","publisher":"Springer","date_created":"2025-09-02T14:33:16Z","author":[{"first_name":"Katharina J.","full_name":"Rohlfing, Katharina J.","id":"50352","last_name":"Rohlfing","orcid":"0000-0002-5676-8233"},{"first_name":"Anna-Lisa","full_name":"Vollmer, Anna-Lisa","last_name":"Vollmer"},{"first_name":"Angela","last_name":"Grimminger","id":"57578","full_name":"Grimminger, Angela"}],"title":"Practices: How to establish an explaining practice","doi":"10.1007/978-981-96-5290-7_5","main_file_link":[{"open_access":"1"}],"quality_controlled":"1","publication_identifier":{"eisbn":["978-981-96-5290-7"]},"publication_status":"epub_ahead","related_material":{"link":[{"relation":"original","url":"https://link.springer.com/chapter/10.1007/978-981-96-5290-7_5"}]},"year":"2026","citation":{"bibtex":"@inbook{Rohlfing_Vollmer_Grimminger_2026, title={Practices: How to establish an explaining practice}, DOI={<a href=\"https://doi.org/10.1007/978-981-96-5290-7_5\">10.1007/978-981-96-5290-7_5</a>}, booktitle={Social Explainable AI}, publisher={Springer}, author={Rohlfing, Katharina J. and Vollmer, Anna-Lisa and Grimminger, Angela}, editor={Rohlfing, Katharina and Främling, Kary and Thommes, Kirsten and Alpsancar, Suzana and Lim, Brian Y.}, year={2026} }","mla":"Rohlfing, Katharina J., et al. “Practices: How to Establish an Explaining Practice.” <i>Social Explainable AI</i>, edited by Katharina Rohlfing et al., Springer, 2026, doi:<a href=\"https://doi.org/10.1007/978-981-96-5290-7_5\">10.1007/978-981-96-5290-7_5</a>.","short":"K.J. Rohlfing, A.-L. Vollmer, A. Grimminger, in: K. Rohlfing, K. Främling, K. Thommes, S. Alpsancar, B.Y. Lim (Eds.), Social Explainable AI, Springer, 2026.","apa":"Rohlfing, K. J., Vollmer, A.-L., &#38; Grimminger, A. (2026). Practices: How to establish an explaining practice. In K. Rohlfing, K. Främling, K. Thommes, S. Alpsancar, &#38; B. Y. Lim (Eds.), <i>Social Explainable AI</i>. Springer. <a href=\"https://doi.org/10.1007/978-981-96-5290-7_5\">https://doi.org/10.1007/978-981-96-5290-7_5</a>","chicago":"Rohlfing, Katharina J., Anna-Lisa Vollmer, and Angela Grimminger. “Practices: How to Establish an Explaining Practice.” In <i>Social Explainable AI</i>, edited by Katharina Rohlfing, Kary Främling, Kirsten Thommes, Suzana Alpsancar, and Brian Y. Lim. Springer, 2026. <a href=\"https://doi.org/10.1007/978-981-96-5290-7_5\">https://doi.org/10.1007/978-981-96-5290-7_5</a>.","ieee":"K. J. Rohlfing, A.-L. Vollmer, and A. Grimminger, “Practices: How to establish an explaining practice,” in <i>Social Explainable AI</i>, K. Rohlfing, K. Främling, K. Thommes, S. Alpsancar, and B. Y. Lim, Eds. Springer, 2026.","ama":"Rohlfing KJ, Vollmer A-L, Grimminger A. Practices: How to establish an explaining practice. In: Rohlfing K, Främling K, Thommes K, Alpsancar S, Lim BY, eds. <i>Social Explainable AI</i>. Springer; 2026. doi:<a href=\"https://doi.org/10.1007/978-981-96-5290-7_5\">10.1007/978-981-96-5290-7_5</a>"}},{"oa":"1","publisher":"Springer","date_updated":"2026-03-23T18:25:34Z","author":[{"last_name":"Buhl","id":"27152","full_name":"Buhl, Heike M.","first_name":"Heike M."},{"first_name":"Britta","last_name":"Wrede","full_name":"Wrede, Britta"},{"orcid":"0000-0002-9997-9241","last_name":"Fisher","full_name":"Fisher, Josephine Beryl","id":"56345","first_name":"Josephine Beryl"},{"first_name":"Marco","full_name":"Matarese, Marco","last_name":"Matarese"}],"date_created":"2026-03-23T07:55:56Z","title":"Adaptation","main_file_link":[{"url":"https://link.springer.com/chapter/10.1007/978-981-96-5290-7_13","open_access":"1"}],"doi":"https://doi.org/10.1007/978-981-96-5290-7_13","publication_identifier":{"unknown":["978-981-96-5290-7"]},"related_material":{"link":[{"relation":"confirmation","url":"https://link.springer.com/chapter/10.1007/978-981-96-5290-7_13"}]},"year":"2026","citation":{"ieee":"H. M. Buhl, B. Wrede, J. B. Fisher, and M. Matarese, “Adaptation,” in <i>Social Explainable AI</i>, K. J. Rohlfing, K. Främling, B. Lim, S. Alpsancar, and K. Thommes, Eds. Springer, 2026, pp. 247–267.","chicago":"Buhl, Heike M., Britta Wrede, Josephine Beryl Fisher, and Marco Matarese. “Adaptation.” In <i>Social Explainable AI</i>, edited by Katharina J. Rohlfing, Kary Främling, Brian Lim, Suzana Alpsancar, and Kirsten Thommes, 247–67. Springer, 2026. <a href=\"https://doi.org/10.1007/978-981-96-5290-7_13\">https://doi.org/10.1007/978-981-96-5290-7_13</a>.","ama":"Buhl HM, Wrede B, Fisher JB, Matarese M. Adaptation. In: Rohlfing KJ, Främling K, Lim B, Alpsancar S, Thommes K, eds. <i>Social Explainable AI</i>. Springer; 2026:247-267. doi:<a href=\"https://doi.org/10.1007/978-981-96-5290-7_13\">https://doi.org/10.1007/978-981-96-5290-7_13</a>","apa":"Buhl, H. M., Wrede, B., Fisher, J. B., &#38; Matarese, M. (2026). Adaptation. In K. J. Rohlfing, K. Främling, B. Lim, S. Alpsancar, &#38; K. Thommes (Eds.), <i>Social Explainable AI</i> (pp. 247–267). Springer. <a href=\"https://doi.org/10.1007/978-981-96-5290-7_13\">https://doi.org/10.1007/978-981-96-5290-7_13</a>","bibtex":"@inbook{Buhl_Wrede_Fisher_Matarese_2026, title={Adaptation}, DOI={<a href=\"https://doi.org/10.1007/978-981-96-5290-7_13\">https://doi.org/10.1007/978-981-96-5290-7_13</a>}, booktitle={Social Explainable AI}, publisher={Springer}, author={Buhl, Heike M. and Wrede, Britta and Fisher, Josephine Beryl and Matarese, Marco}, editor={Rohlfing, Katharina J. and Främling, Kary and Lim, Brian and Alpsancar, Suzana and Thommes, Kirsten}, year={2026}, pages={247–267} }","short":"H.M. Buhl, B. Wrede, J.B. Fisher, M. Matarese, in: K.J. Rohlfing, K. Främling, B. Lim, S. Alpsancar, K. Thommes (Eds.), Social Explainable AI, Springer, 2026, pp. 247–267.","mla":"Buhl, Heike M., et al. “Adaptation.” <i>Social Explainable AI</i>, edited by Katharina J. Rohlfing et al., Springer, 2026, pp. 247–67, doi:<a href=\"https://doi.org/10.1007/978-981-96-5290-7_13\">https://doi.org/10.1007/978-981-96-5290-7_13</a>."},"page":"247-267","project":[{"name":"TRR 318; TP A01: Adaptives Erklären","_id":"111"},{"_id":"114","name":"TRR 318; TP A04: Integration des technischen Modells in das Partnermodell bei der Erklärung von digitalen Artefakten"},{"_id":"115","name":"TRR 318; TP A05: Echtzeitmessung der Aufmerksamkeit im Mensch-Roboter-Erklärdialog"},{"name":"TRR 318: Project Area INF","_id":"118"}],"_id":"65083","user_id":"90826","department":[{"_id":"427"},{"_id":"660"}],"language":[{"iso":"eng"}],"type":"book_chapter","publication":"Social Explainable AI","editor":[{"full_name":"Rohlfing, Katharina J.","last_name":"Rohlfing","first_name":"Katharina J."},{"first_name":"Kary","last_name":"Främling","full_name":"Främling, Kary"},{"first_name":"Brian","full_name":"Lim, Brian","last_name":"Lim"},{"full_name":"Alpsancar, Suzana","last_name":"Alpsancar","first_name":"Suzana"},{"first_name":"Kirsten","full_name":"Thommes, Kirsten","last_name":"Thommes"}],"status":"public"},{"author":[{"first_name":"Amit","last_name":"Singh","orcid":"0000-0002-7789-1521","id":"91018","full_name":"Singh, Amit"},{"first_name":"Katharina J.","id":"50352","full_name":"Rohlfing, Katharina J.","last_name":"Rohlfing","orcid":"0000-0002-5676-8233"}],"volume":49,"oa":"1","date_updated":"2025-08-18T08:31:04Z","main_file_link":[{"url":"https://pubmed.ncbi.nlm.nih.gov/40810767/","open_access":"1"}],"doi":"10.1111/cogs.70096","publication_status":"published","pmid":"1","citation":{"chicago":"Singh, Amit, and Katharina J. Rohlfing. “Contrastive Verbal Guidance: A Beneficial Context for Attention To Events and Their Memory?” <i>Cognitive Science</i> 49, no. 8 (2025). <a href=\"https://doi.org/10.1111/cogs.70096\">https://doi.org/10.1111/cogs.70096</a>.","ieee":"A. Singh and K. J. Rohlfing, “Contrastive Verbal Guidance: A Beneficial Context for Attention To Events and Their Memory?,” <i>Cognitive Science</i>, vol. 49, no. 8, Art. no. e70096, 2025, doi: <a href=\"https://doi.org/10.1111/cogs.70096\">10.1111/cogs.70096</a>.","ama":"Singh A, Rohlfing KJ. Contrastive Verbal Guidance: A Beneficial Context for Attention To Events and Their Memory? <i>Cognitive Science</i>. 2025;49(8). doi:<a href=\"https://doi.org/10.1111/cogs.70096\">10.1111/cogs.70096</a>","apa":"Singh, A., &#38; Rohlfing, K. J. (2025). Contrastive Verbal Guidance: A Beneficial Context for Attention To Events and Their Memory? <i>Cognitive Science</i>, <i>49</i>(8), Article e70096. <a href=\"https://doi.org/10.1111/cogs.70096\">https://doi.org/10.1111/cogs.70096</a>","bibtex":"@article{Singh_Rohlfing_2025, title={Contrastive Verbal Guidance: A Beneficial Context for Attention To Events and Their Memory?}, volume={49}, DOI={<a href=\"https://doi.org/10.1111/cogs.70096\">10.1111/cogs.70096</a>}, number={8e70096}, journal={Cognitive Science}, publisher={Wiley}, author={Singh, Amit and Rohlfing, Katharina J.}, year={2025} }","short":"A. Singh, K.J. Rohlfing, Cognitive Science 49 (2025).","mla":"Singh, Amit, and Katharina J. Rohlfing. “Contrastive Verbal Guidance: A Beneficial Context for Attention To Events and Their Memory?” <i>Cognitive Science</i>, vol. 49, no. 8, e70096, Wiley, 2025, doi:<a href=\"https://doi.org/10.1111/cogs.70096\">10.1111/cogs.70096</a>."},"intvolume":"        49","user_id":"91018","department":[{"_id":"749"},{"_id":"660"}],"project":[{"_id":"115","name":"TRR 318; TP A05: Echtzeitmessung der Aufmerksamkeit im Mensch-Roboter-Erklärdialog"}],"_id":"60935","article_type":"original","article_number":"e70096","type":"journal_article","status":"public","date_created":"2025-08-18T08:30:30Z","publisher":"Wiley","title":"Contrastive Verbal Guidance: A Beneficial Context for Attention To Events and Their Memory?","issue":"8","quality_controlled":"1","year":"2025","external_id":{"pmid":["40810767"]},"language":[{"iso":"eng"}],"publication":"Cognitive Science","abstract":[{"lang":"eng","text":"Research suggests that presenting an action via multimodal stimulation (verbal and visual) enhances its perception. To highlight this, in most studies, assertive instructions are generally presented before the occurrence of the visual subevent(s). However, verbal instructions need not always be assertive; they can also include negation to contrast the present event with a prior one, thereby facilitating processing—a phenomenon known as contextual facilitation. In our study, we investigated whether using negation to guide an action sequence facilitates action perception, particularly when two consecutive subactions contrast with each other. Stimuli from previous studies on action demonstration were used to create (non)contrastive actions, that is, a ball following noncontrastive and identical (Over–Over or Under–Under) versus contrastive and opposite paths (Over–Under or Under–Over) before terminating at a goal location. In Experiment 1, either an assertive or a negative instruction was provided as verbal guidance before onset of each path. Analyzing data from 35 participants, we found that, whereas assertive instructions facilitate overall action recall, negating the later path for contrastive actions is equally facilitative. Given that action goal is the most salient aspect in event memory due to goal-path bias in attention, a second experiment was conducted to test the effect of multimodal synchrony on goal attention and action memory. Experiment 2 revealed that when instructions overlap with actions, they become more tailored—assertive instructions effectively guide noncontrastive actions, while assertive–negative instruction particularly guides contrastive actions. Both studies suggest that increased attention to the goal leads to coarser perception of midevents, with action-instruction synchrony modulating goal bias in real-time event apprehension to serve distinct purposes for action conceptualization. Whereas presenting instructions before subactions attenuates goal attention, overlapping instructions increase goal attention and reveal the selective roles of assertive and negative instructions in guiding contrastive and noncontrastive actions."}]},{"type":"preprint","status":"public","abstract":[{"lang":"eng","text":"<p>The present article offers an assessment of intra-individual variability in visualattention using the Theory of Visual Attention, which provides a formal framework forquantifying attentional components. We specifically investigated overall attentionalcapacity – that is, the available processing speed – and its distribution, the relativeattentional weight.By reanalyzing a large existing dataset from Tünnermann and Scharlau (2021),we found that across multiple testing days, participants either remained stable within a20 Hz margin or showed consistent improvements in capacity – in some cases triplingtheir initial capacity. The weights in response to salient stimuli were remarkablyconsistent.To determine whether increases in capacity reflect pure test-retest effects or arefacilitated by consolidation between days, and to quantify within-day variability, weconducted a second study in which participants completed five self-administeredsessions within a single day. Capacities remained within the same magnitude and didnot show a consistent directional trend. The relative weights exhibited comparativelylittle variation in most participants, akin to the previously analyzed dataset. Further,estimation uncertainty increased with higher capacity values.These results suggest that capacity may be subject to training effects, but thatsuch improvements appear to depend on longer breaks between sessions. This hasimportant implications for individualized assessment: A personal prior could beestimated from a single session to accelerate future estimations, as long as subsequentsessions occur on the same day. Participants with higher capacities may require tailoredexperimentation methods when small to medium effects are of interest, due to increaseduncertainty.</p>"}],"user_id":"38219","department":[{"_id":"424"}],"project":[{"_id":"115","name":"TRR 318; TP A05: Echtzeitmessung der Aufmerksamkeit im Mensch-Roboter-Erklärdialog"}],"_id":"61119","language":[{"iso":"eng"}],"publication_status":"published","citation":{"apa":"Banh, N. C., &#38; Scharlau, I. (2025). <i>Intra-individual variability in TVA attentional capacity and weight distribution: A reanalysis across days and an experiment within-day</i>. Center for Open Science.","short":"N.C. Banh, I. Scharlau, (2025).","bibtex":"@article{Banh_Scharlau_2025, title={Intra-individual variability in TVA attentional capacity and weight distribution: A reanalysis across days and an experiment within-day}, publisher={Center for Open Science}, author={Banh, Ngoc Chi and Scharlau, Ingrid}, year={2025} }","mla":"Banh, Ngoc Chi, and Ingrid Scharlau. <i>Intra-Individual Variability in TVA Attentional Capacity and Weight Distribution: A Reanalysis across Days and an Experiment within-Day</i>. Center for Open Science, 2025.","ieee":"N. C. Banh and I. Scharlau, “Intra-individual variability in TVA attentional capacity and weight distribution: A reanalysis across days and an experiment within-day.” Center for Open Science, 2025.","chicago":"Banh, Ngoc Chi, and Ingrid Scharlau. “Intra-Individual Variability in TVA Attentional Capacity and Weight Distribution: A Reanalysis across Days and an Experiment within-Day.” Center for Open Science, 2025.","ama":"Banh NC, Scharlau I. Intra-individual variability in TVA attentional capacity and weight distribution: A reanalysis across days and an experiment within-day. Published online 2025."},"year":"2025","date_created":"2025-09-03T11:30:48Z","author":[{"first_name":"Ngoc Chi","last_name":"Banh","full_name":"Banh, Ngoc Chi"},{"last_name":"Scharlau","orcid":"0000-0003-2364-9489","full_name":"Scharlau, Ingrid","id":"451","first_name":"Ingrid"}],"oa":"1","publisher":"Center for Open Science","date_updated":"2025-09-09T12:04:43Z","main_file_link":[{"url":"https://osf.io/preprints/psyarxiv/fzvph","open_access":"1"}],"title":"Intra-individual variability in TVA attentional capacity and weight distribution: A reanalysis across days and an experiment within-day"},{"title":"Manners Matter: Action history guides attention and repair choices during interaction","date_created":"2025-09-24T12:32:52Z","year":"2025","quality_controlled":"1","keyword":["Attention","Action","Repairs","Task model","HRI","Eyemovement"],"language":[{"iso":"eng"}],"abstract":[{"lang":"eng","text":"This study investigated how action histories – unfolding sequences of actions with objects – provide a context for both attentional allocation and linguistic repair strategies. Building on theories of enactive cognition and sensorimotor contingency theory, we experimentally manipulated action sequences (action history) to create either simple or rich “situational models,” and investigated how these models interact with attention and reflect in linguistic processes during human–robot interaction. Participants (N = 30) engaged in a controlled object placement task with a humanoid robot, where the action (manner) information was either provided or omitted. The omission elicited repair behaviors in participants that were in focus of our investigation. For rich models (competing action possibilities) participants demonstrated: a) increased attentional reorientation, reflecting active engagement with the situational model b) preference for restricted repairs, targeting the specific source of trouble in action selection. Conversely, a simple situational model led to more generalized attention patterns and open repair strategies, suggesting weaker constraints on internal processing. These findings highlight how situational structures emerge externally to scaffold internal cognitive processes, with action histories serving as a crucial context for the interface between perception, action, and language. We discuss how to implement such a tight loop in the assistance of a system."}],"publication":"IEEE International Conference on Development and Learning (ICDL)","conference":{"end_date":"2025-09-19","location":"Prague","name":"IEEE International Conference on Development and Learning (ICDL)","start_date":"2025-09-15"},"doi":"10.31234/osf.io/yn2we_v1","main_file_link":[{"url":"https://doi.org/10.31234/osf.io/yn2we_v1","open_access":"1"}],"oa":"1","date_updated":"2025-09-24T12:39:25Z","author":[{"last_name":"Singh","orcid":"0000-0002-7789-1521","full_name":"Singh, Amit","id":"91018","first_name":"Amit"},{"orcid":"0000-0002-5676-8233","last_name":"Rohlfing","full_name":"Rohlfing, Katharina J.","id":"50352","first_name":"Katharina J."}],"place":" Prague","citation":{"ama":"Singh A, Rohlfing KJ. Manners Matter: Action history guides attention and repair choices during interaction. In: <i>IEEE International Conference on Development and Learning (ICDL)</i>. ; 2025. doi:<a href=\"https://doi.org/10.31234/osf.io/yn2we_v1\">10.31234/osf.io/yn2we_v1</a>","ieee":"A. Singh and K. J. Rohlfing, “Manners Matter: Action history guides attention and repair choices during interaction,” presented at the IEEE International Conference on Development and Learning (ICDL), Prague, 2025, doi: <a href=\"https://doi.org/10.31234/osf.io/yn2we_v1\">10.31234/osf.io/yn2we_v1</a>.","chicago":"Singh, Amit, and Katharina J. Rohlfing. “Manners Matter: Action History Guides Attention and Repair Choices during Interaction.” In <i>IEEE International Conference on Development and Learning (ICDL)</i>.  Prague, 2025. <a href=\"https://doi.org/10.31234/osf.io/yn2we_v1\">https://doi.org/10.31234/osf.io/yn2we_v1</a>.","apa":"Singh, A., &#38; Rohlfing, K. J. (2025). Manners Matter: Action history guides attention and repair choices during interaction. <i>IEEE International Conference on Development and Learning (ICDL)</i>. IEEE International Conference on Development and Learning (ICDL), Prague. <a href=\"https://doi.org/10.31234/osf.io/yn2we_v1\">https://doi.org/10.31234/osf.io/yn2we_v1</a>","bibtex":"@inproceedings{Singh_Rohlfing_2025, place={ Prague}, title={Manners Matter: Action history guides attention and repair choices during interaction}, DOI={<a href=\"https://doi.org/10.31234/osf.io/yn2we_v1\">10.31234/osf.io/yn2we_v1</a>}, booktitle={IEEE International Conference on Development and Learning (ICDL)}, author={Singh, Amit and Rohlfing, Katharina J.}, year={2025} }","short":"A. Singh, K.J. Rohlfing, in: IEEE International Conference on Development and Learning (ICDL),  Prague, 2025.","mla":"Singh, Amit, and Katharina J. Rohlfing. “Manners Matter: Action History Guides Attention and Repair Choices during Interaction.” <i>IEEE International Conference on Development and Learning (ICDL)</i>, 2025, doi:<a href=\"https://doi.org/10.31234/osf.io/yn2we_v1\">10.31234/osf.io/yn2we_v1</a>."},"publication_status":"published","_id":"61432","project":[{"name":"TRR 318; TP A05: Echtzeitmessung der Aufmerksamkeit im Mensch-Roboter-Erklärdialog","_id":"115"}],"department":[{"_id":"749"},{"_id":"660"}],"user_id":"91018","status":"public","type":"conference"},{"publication_status":"published","quality_controlled":"1","citation":{"ieee":"A. Singh and K. J. Rohlfing, “Embedding Psycholinguistics: An Interactive Framework for Studying Language in Action,” presented at the 6th Biannual Conference of the German Society for Cognitive Science, Bochum, Germany, Bochum, 2025, doi: <a href=\"https://doi.org/10.17605/OSF.IO/8PR23\">10.17605/OSF.IO/8PR23</a>.","chicago":"Singh, Amit, and Katharina J. Rohlfing. “Embedding Psycholinguistics: An Interactive Framework for Studying Language in Action.” In <i>6th Biannual Conference of the German Society for Cognitive Science, Bochum, Germany</i>. Bochum, 2025. <a href=\"https://doi.org/10.17605/OSF.IO/8PR23\">https://doi.org/10.17605/OSF.IO/8PR23</a>.","ama":"Singh A, Rohlfing KJ. Embedding Psycholinguistics: An Interactive Framework for Studying Language in Action. In: <i>6th Biannual Conference of the German Society for Cognitive Science, Bochum, Germany</i>. ; 2025. doi:<a href=\"https://doi.org/10.17605/OSF.IO/8PR23\">10.17605/OSF.IO/8PR23</a>","bibtex":"@inproceedings{Singh_Rohlfing_2025, place={Bochum}, title={Embedding Psycholinguistics: An Interactive Framework for Studying Language in Action}, DOI={<a href=\"https://doi.org/10.17605/OSF.IO/8PR23\">10.17605/OSF.IO/8PR23</a>}, booktitle={6th Biannual Conference of the German Society for Cognitive Science, Bochum, Germany}, author={Singh, Amit and Rohlfing, Katharina J.}, year={2025} }","mla":"Singh, Amit, and Katharina J. Rohlfing. “Embedding Psycholinguistics: An Interactive Framework for Studying Language in Action.” <i>6th Biannual Conference of the German Society for Cognitive Science, Bochum, Germany</i>, 2025, doi:<a href=\"https://doi.org/10.17605/OSF.IO/8PR23\">10.17605/OSF.IO/8PR23</a>.","short":"A. Singh, K.J. Rohlfing, in: 6th Biannual Conference of the German Society for Cognitive Science, Bochum, Germany, Bochum, 2025.","apa":"Singh, A., &#38; Rohlfing, K. J. (2025). Embedding Psycholinguistics: An Interactive Framework for Studying Language in Action. <i>6th Biannual Conference of the German Society for Cognitive Science, Bochum, Germany</i>. 6th Biannual Conference of the German Society for Cognitive Science, Bochum, Germany, Bochum. <a href=\"https://doi.org/10.17605/OSF.IO/8PR23\">https://doi.org/10.17605/OSF.IO/8PR23</a>"},"place":"Bochum","year":"2025","author":[{"orcid":"0000-0002-7789-1521","last_name":"Singh","id":"91018","full_name":"Singh, Amit","first_name":"Amit"},{"orcid":"0000-0002-5676-8233","last_name":"Rohlfing","id":"50352","full_name":"Rohlfing, Katharina J.","first_name":"Katharina J."}],"date_created":"2025-09-23T09:04:40Z","date_updated":"2025-09-24T12:47:47Z","main_file_link":[{"url":"https://osf.io/ghymr"}],"conference":{"name":"6th Biannual Conference of the German Society for Cognitive Science, Bochum, Germany","start_date":"2025-09-01","end_date":"2025-09-03","location":"Bochum"},"doi":"10.17605/OSF.IO/8PR23","title":"Embedding Psycholinguistics: An Interactive Framework for Studying Language in Action","type":"conference","publication":"6th Biannual Conference of the German Society for Cognitive Science, Bochum, Germany","status":"public","abstract":[{"lang":"eng","text":"We introduce a method to study online language processes in human--robot interactive setup. In this interaction, language mediated eye movements can be studied as the dialogue unfolds between human and a robot.  \r\nTraditionally, real-time linguistic processes are studied using visual world paradigms (VWP), where either the comprehension or the production tasks are implemented on screens for controlled investigations. Going beyond these traditional and unidirectional approach, we bring together production--comprehension loop with the help of a humanoid robot to preserve interactivity in an ecologically valid yet controlled setup. We discuss the potential of such setups for designing and evaluating findings from language--vision interplay in psycholinguistics. Our setup shows a potential to depart from traditional screen based experiments, balancing the dynamics of the interaction with control of the human behaviors. "}],"user_id":"91018","department":[{"_id":"749"},{"_id":"660"}],"project":[{"name":"TRR 318; TP A05: Echtzeitmessung der Aufmerksamkeit im Mensch-Roboter-Erklärdialog","_id":"115"}],"_id":"61401","language":[{"iso":"eng"}]},{"language":[{"iso":"eng"}],"keyword":["understanding","explaining","explanations","explainable","AI","interdisciplinarity","comprehension","enabledness","agency"],"ddc":["006"],"file":[{"content_type":"application/pdf","success":1,"relation":"main_file","date_updated":"2025-12-01T21:02:20Z","date_created":"2025-12-01T21:02:20Z","creator":"hbuschme","file_size":10114981,"file_id":"62730","access_level":"closed","file_name":"Buschmeier-etal-2025-COGSYS.pdf"}],"abstract":[{"lang":"eng","text":"Explainability has become an important topic in computer science and artificial intelligence, leading to a subfield called Explainable Artificial Intelligence (XAI). The goal of providing or seeking explanations is to achieve (better) ‘understanding’ on the part of the explainee. However, what it means to ‘understand’ is still not clearly defined, and the concept itself is rarely the subject of scientific investigation. This conceptual article aims to present a model of forms of understanding for XAI-explanations and beyond. From an interdisciplinary perspective bringing together computer science, linguistics, sociology, philosophy and psychology, a definition of understanding and its forms, assessment, and dynamics during the process of giving everyday explanations are explored. Two types of understanding are considered as possible outcomes of explanations, namely enabledness, ‘knowing how’ to do or decide something, and comprehension, ‘knowing that’ – both in different degrees (from shallow to deep). Explanations regularly start with shallow understanding in a specific domain and can lead to deep comprehension and enabledness of the explanandum, which we see as a prerequisite for human users to gain agency. In this process, the increase of comprehension and enabledness are highly interdependent. Against the background of this systematization, special challenges of understanding in XAI are discussed."}],"publication":"Cognitive Systems Research","title":"Forms of Understanding for XAI-Explanations","date_created":"2025-09-08T14:24:32Z","year":"2025","quality_controlled":"1","file_date_updated":"2025-12-01T21:02:20Z","article_number":"101419","article_type":"original","department":[{"_id":"660"}],"user_id":"57578","_id":"61156","project":[{"name":"TRR 318; TP A01: Adaptives Erklären","_id":"111"},{"name":"TRR 318; TP A02: Verstehensprozess einer Erklärung beobachten und auswerten","_id":"112"},{"name":"TRR 318 - Subproject A3","_id":"113"},{"name":"TRR 318; TP A04: Integration des technischen Modells in das Partnermodell bei der Erklärung von digitalen Artefakten","_id":"114"},{"_id":"115","name":"TRR 318; TP A05: Echtzeitmessung der Aufmerksamkeit im Mensch-Roboter-Erklärdialog"},{"_id":"122","name":"TRR 318 - Subproject B3"},{"name":"TRR 318 - Subproject B5","_id":"123"},{"name":"TRR 318 - Project Area Ö","_id":"119"}],"status":"public","type":"journal_article","doi":"10.1016/j.cogsys.2025.101419","main_file_link":[{"open_access":"1","url":"https://www.sciencedirect.com/science/article/pii/S1389041725000993?via%3Dihub"}],"volume":94,"author":[{"orcid":"0000-0002-9613-5713","last_name":"Buschmeier","full_name":"Buschmeier, Hendrik","id":"76456","first_name":"Hendrik"},{"first_name":"Heike M.","id":"27152","full_name":"Buhl, Heike M.","last_name":"Buhl"},{"full_name":"Kern, Friederike","last_name":"Kern","first_name":"Friederike"},{"first_name":"Angela","last_name":"Grimminger","full_name":"Grimminger, Angela","id":"57578"},{"first_name":"Helen","id":"50995","full_name":"Beierling, Helen","last_name":"Beierling"},{"orcid":"0000-0002-9997-9241","last_name":"Fisher","id":"56345","full_name":"Fisher, Josephine Beryl","first_name":"Josephine Beryl"},{"first_name":"André","orcid":"0000-0002-9593-7220","last_name":"Groß","id":"93405","full_name":"Groß, André"},{"last_name":"Horwath","full_name":"Horwath, Ilona","id":"68836","first_name":"Ilona"},{"first_name":"Nils","id":"98454","full_name":"Klowait, Nils","orcid":"0000-0002-7347-099X","last_name":"Klowait"},{"orcid":"0009-0009-0892-9483","last_name":"Lazarov","id":"90345","full_name":"Lazarov, Stefan Teodorov","first_name":"Stefan Teodorov"},{"first_name":"Michael","last_name":"Lenke","full_name":"Lenke, Michael"},{"last_name":"Lohmer","full_name":"Lohmer, Vivien","first_name":"Vivien"},{"id":"50352","full_name":"Rohlfing, Katharina","orcid":"0000-0002-5676-8233","last_name":"Rohlfing","first_name":"Katharina"},{"first_name":"Ingrid","last_name":"Scharlau","orcid":"0000-0003-2364-9489","full_name":"Scharlau, Ingrid","id":"451"},{"last_name":"Singh","orcid":"0000-0002-7789-1521","id":"91018","full_name":"Singh, Amit","first_name":"Amit"},{"first_name":"Lutz","last_name":"Terfloth","id":"37320","full_name":"Terfloth, Lutz"},{"id":"86589","full_name":"Vollmer, Anna-Lisa","last_name":"Vollmer","first_name":"Anna-Lisa"},{"last_name":"Wang","full_name":"Wang, Yu","first_name":"Yu"},{"last_name":"Wilmes","full_name":"Wilmes, Annedore","first_name":"Annedore"},{"first_name":"Britta","full_name":"Wrede, Britta","last_name":"Wrede"}],"date_updated":"2025-12-05T15:32:25Z","oa":"1","intvolume":"        94","citation":{"apa":"Buschmeier, H., Buhl, H. M., Kern, F., Grimminger, A., Beierling, H., Fisher, J. B., Groß, A., Horwath, I., Klowait, N., Lazarov, S. T., Lenke, M., Lohmer, V., Rohlfing, K., Scharlau, I., Singh, A., Terfloth, L., Vollmer, A.-L., Wang, Y., Wilmes, A., &#38; Wrede, B. (2025). Forms of Understanding for XAI-Explanations. <i>Cognitive Systems Research</i>, <i>94</i>, Article 101419. <a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">https://doi.org/10.1016/j.cogsys.2025.101419</a>","mla":"Buschmeier, Hendrik, et al. “Forms of Understanding for XAI-Explanations.” <i>Cognitive Systems Research</i>, vol. 94, 101419, 2025, doi:<a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">10.1016/j.cogsys.2025.101419</a>.","bibtex":"@article{Buschmeier_Buhl_Kern_Grimminger_Beierling_Fisher_Groß_Horwath_Klowait_Lazarov_et al._2025, title={Forms of Understanding for XAI-Explanations}, volume={94}, DOI={<a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">10.1016/j.cogsys.2025.101419</a>}, number={101419}, journal={Cognitive Systems Research}, author={Buschmeier, Hendrik and Buhl, Heike M. and Kern, Friederike and Grimminger, Angela and Beierling, Helen and Fisher, Josephine Beryl and Groß, André and Horwath, Ilona and Klowait, Nils and Lazarov, Stefan Teodorov and et al.}, year={2025} }","short":"H. Buschmeier, H.M. Buhl, F. Kern, A. Grimminger, H. Beierling, J.B. Fisher, A. Groß, I. Horwath, N. Klowait, S.T. Lazarov, M. Lenke, V. Lohmer, K. Rohlfing, I. Scharlau, A. Singh, L. Terfloth, A.-L. Vollmer, Y. Wang, A. Wilmes, B. Wrede, Cognitive Systems Research 94 (2025).","ieee":"H. Buschmeier <i>et al.</i>, “Forms of Understanding for XAI-Explanations,” <i>Cognitive Systems Research</i>, vol. 94, Art. no. 101419, 2025, doi: <a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">10.1016/j.cogsys.2025.101419</a>.","chicago":"Buschmeier, Hendrik, Heike M. Buhl, Friederike Kern, Angela Grimminger, Helen Beierling, Josephine Beryl Fisher, André Groß, et al. “Forms of Understanding for XAI-Explanations.” <i>Cognitive Systems Research</i> 94 (2025). <a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">https://doi.org/10.1016/j.cogsys.2025.101419</a>.","ama":"Buschmeier H, Buhl HM, Kern F, et al. Forms of Understanding for XAI-Explanations. <i>Cognitive Systems Research</i>. 2025;94. doi:<a href=\"https://doi.org/10.1016/j.cogsys.2025.101419\">10.1016/j.cogsys.2025.101419</a>"},"has_accepted_license":"1","publication_status":"published"},{"conference":{"end_date":"2024-03-20","location":"Regensburg","name":"Tagung experimentell arbeitender Psycholog:innen (TeaP)","start_date":"2024-03-17"},"main_file_link":[{"open_access":"1"}],"title":"Effects of task difficulty on visual processing speed","date_created":"2024-03-27T11:43:51Z","author":[{"full_name":"Banh, Ngoc Chi","id":"38219","orcid":"0000-0002-5946-4542","last_name":"Banh","first_name":"Ngoc Chi"},{"first_name":"Ingrid","last_name":"Scharlau","orcid":"0000-0003-2364-9489","full_name":"Scharlau, Ingrid","id":"451"}],"oa":"1","date_updated":"2024-06-26T08:02:07Z","citation":{"bibtex":"@inproceedings{Banh_Scharlau_2024, title={Effects of task difficulty on visual processing speed}, author={Banh, Ngoc Chi and Scharlau, Ingrid}, year={2024} }","mla":"Banh, Ngoc Chi, and Ingrid Scharlau. <i>Effects of Task Difficulty on Visual Processing Speed</i>. 2024.","short":"N.C. Banh, I. Scharlau, in: 2024.","apa":"Banh, N. C., &#38; Scharlau, I. (2024). <i>Effects of task difficulty on visual processing speed</i>. Tagung experimentell arbeitender Psycholog:innen (TeaP), Regensburg.","chicago":"Banh, Ngoc Chi, and Ingrid Scharlau. “Effects of Task Difficulty on Visual Processing Speed,” 2024.","ieee":"N. C. Banh and I. Scharlau, “Effects of task difficulty on visual processing speed,” presented at the Tagung experimentell arbeitender Psycholog:innen (TeaP), Regensburg, 2024.","ama":"Banh NC, Scharlau I. Effects of task difficulty on visual processing speed. In: ; 2024."},"year":"2024","quality_controlled":"1","has_accepted_license":"1","language":[{"iso":"eng"}],"file_date_updated":"2024-03-27T11:42:20Z","ddc":["150"],"department":[{"_id":"424"},{"_id":"660"}],"user_id":"38219","_id":"53069","project":[{"name":"TRR 318 - A05: TRR 318 - Echtzeitmessung der Aufmerksamkeit im Mensch-Roboter-Erklärdialog (Teilprojekt A05)","_id":"115","grant_number":"438445824"},{"name":"PC2: Computing Resources Provided by the Paderborn Center for Parallel Computing","_id":"52"}],"status":"public","file":[{"content_type":"application/pdf","success":1,"relation":"main_file","date_updated":"2024-03-27T11:42:20Z","creator":"ncbanh","date_created":"2024-03-27T11:42:20Z","file_size":1237859,"access_level":"closed","file_id":"53070","file_name":"Banh & Scharlau (2024) - Effects of task difficulty on visual processing speed.pdf"}],"type":"conference_abstract"},{"abstract":[{"text":"In a successful dialogue in general and a successful explanation in specific, partners need to account for both, the task model (what is relevant for the task) and the partner model (what one can con- tribute). The phenomenon of coupling between task and the partner model becomes especially interesting in the context of Human– Robot Interaction where humans have to deal with unknown ca- pabilities of the robot, which can momentarily be perceived when the robot is unable to contribute to the task. Following research on the path over manner prominence in an action [31–33], a robot ex- plained actions to a human by emphasizing two aspects – the path (\"where\" component) and the manner (\"how\" component). On criti- cal trials, the robot occasionally omitted one of these components where participants sought missing information for the path or the manner. Participants’ information-seeking and gaze behaviour were analysed. Analysis confirms the initial predictions for, a) task model (path over manner prominence), i.e., earlier information-seeking for path-missing than manner-missing trials, and b) partner model, i.e., while information-seeking is predominantly tied to the attention on the robot’s face, when robot fails to provide resolution, attention shifts more often towards its torso – a behavior likely to indicate an exploration of the robot’s capabilities. An individual-level anal- ysis further confirms that the intra-individual variation in the task model is partly influenced by the perceived capability of the robot.","lang":"eng"}],"status":"public","type":"conference","publication":"Proceedings of 26th ACM International Conference on Multimodal Interaction (ICMI 2024)","ddc":["410"],"keyword":["Explanation","Scaffolding","Eyetracking","Partner Model","HRI"],"language":[{"iso":"eng"}],"project":[{"grant_number":"438445824","_id":"115","name":"TRR 318 - A05: TRR 318 - Echtzeitmessung der Aufmerksamkeit im Mensch-Roboter-Erklärdialog (Teilprojekt A05)"}],"_id":"56660","user_id":"91018","department":[{"_id":"749"},{"_id":"660"}],"year":"2024","citation":{"ama":"Singh A, Rohlfing KJ. Coupling of Task and Partner Model: Investigating the Intra-Individual Variability in Gaze during Human–Robot Explanatory Dialogue. In: <i>Proceedings of 26th ACM International Conference on Multimodal Interaction (ICMI 2024)</i>. ; 2024. doi:<a href=\"https://doi.org/10.1145/3686215.3689202\">10.1145/3686215.3689202</a>","ieee":"A. Singh and K. J. Rohlfing, “Coupling of Task and Partner Model: Investigating the Intra-Individual Variability in Gaze during Human–Robot Explanatory Dialogue,” presented at the 26th ACM International Conference on Multimodal Interaction (ICMI 2024), San Jose, Costa Rica, 2024, doi: <a href=\"https://doi.org/10.1145/3686215.3689202\">10.1145/3686215.3689202</a>.","chicago":"Singh, Amit, and Katharina J. Rohlfing. “Coupling of Task and Partner Model: Investigating the Intra-Individual Variability in Gaze during Human–Robot Explanatory Dialogue.” In <i>Proceedings of 26th ACM International Conference on Multimodal Interaction (ICMI 2024)</i>, 2024. <a href=\"https://doi.org/10.1145/3686215.3689202\">https://doi.org/10.1145/3686215.3689202</a>.","apa":"Singh, A., &#38; Rohlfing, K. J. (2024). Coupling of Task and Partner Model: Investigating the Intra-Individual Variability in Gaze during Human–Robot Explanatory Dialogue. <i>Proceedings of 26th ACM International Conference on Multimodal Interaction (ICMI 2024)</i>. 26th ACM International Conference on Multimodal Interaction (ICMI 2024), San Jose, Costa Rica. <a href=\"https://doi.org/10.1145/3686215.3689202\">https://doi.org/10.1145/3686215.3689202</a>","mla":"Singh, Amit, and Katharina J. Rohlfing. “Coupling of Task and Partner Model: Investigating the Intra-Individual Variability in Gaze during Human–Robot Explanatory Dialogue.” <i>Proceedings of 26th ACM International Conference on Multimodal Interaction (ICMI 2024)</i>, 2024, doi:<a href=\"https://doi.org/10.1145/3686215.3689202\">10.1145/3686215.3689202</a>.","bibtex":"@inproceedings{Singh_Rohlfing_2024, title={Coupling of Task and Partner Model: Investigating the Intra-Individual Variability in Gaze during Human–Robot Explanatory Dialogue}, DOI={<a href=\"https://doi.org/10.1145/3686215.3689202\">10.1145/3686215.3689202</a>}, booktitle={Proceedings of 26th ACM International Conference on Multimodal Interaction (ICMI 2024)}, author={Singh, Amit and Rohlfing, Katharina J.}, year={2024} }","short":"A. Singh, K.J. Rohlfing, in: Proceedings of 26th ACM International Conference on Multimodal Interaction (ICMI 2024), 2024."},"has_accepted_license":"1","title":"Coupling of Task and Partner Model: Investigating the Intra-Individual Variability in Gaze during Human–Robot Explanatory Dialogue","conference":{"location":"San Jose, Costa Rica","name":"26th ACM International Conference on Multimodal Interaction (ICMI 2024)"},"doi":"10.1145/3686215.3689202","date_updated":"2024-11-06T10:56:34Z","date_created":"2024-10-17T09:35:32Z","author":[{"first_name":"Amit","id":"91018","full_name":"Singh, Amit","last_name":"Singh","orcid":"0000-0002-7789-1521"},{"first_name":"Katharina J.","id":"50352","full_name":"Rohlfing, Katharina J.","last_name":"Rohlfing"}]},{"language":[{"iso":"eng"}],"abstract":[{"lang":"eng","text":"Negated statements require more processing efforts than assertions. However, in certain contexts, repeating negations undergo adaptation, which over time mitigates the effort.\r\nHere, we ask negations hamper visual processing and whether consecutive repetitions mitigate its influence. \r\nWe assessed the overall attentional capacity and its distribution, the relative weight, quantitatively using \r\nthe formal Theory of Visual Attention (TVA).\r\nWe employed a very simple form for negations, binary negations. Negated instructions, expressing the only alternative to the core supposition, were cognitively demanding, resulting in a loss of attentional capacity in three experiments. The overall attentional capacity recovered gradually but stagnated at a lower level than with assertions, even after many repetitions. Additionally, negations distributed the attention equally between target and reference stimulus. Repetitions slightly increased the reference' share of attention. Assertions, on the other hand, shifted the attentional weight towards the target. Few repetitions slightly decreased the bias towards the target, many repetitions increased the bias."}],"publication":"Frontiers in Psychology","title":"Benefiting from Binary Negations? Verbal Negations Decrease Visual Attention and Balance Its Distribution","date_created":"2024-03-27T12:16:33Z","year":"2024","article_type":"original","department":[{"_id":"424"},{"_id":"660"}],"user_id":"38219","_id":"53072","project":[{"_id":"115","name":"TRR 318 - A05: TRR 318 - Echtzeitmessung der Aufmerksamkeit im Mensch-Roboter-Erklärdialog (Teilprojekt A05)","grant_number":"438445824"},{"_id":"52","name":"PC2: Computing Resources Provided by the Paderborn Center for Parallel Computing"}],"status":"public","type":"journal_article","doi":"10.3389/fpsyg.2024.1451309","main_file_link":[{"open_access":"1","url":"https://www.frontiersin.org/journals/psychology/articles/10.3389/fpsyg.2024.1451309/abstract"}],"volume":15,"author":[{"last_name":"Banh","orcid":"0000-0002-5946-4542","id":"38219","full_name":"Banh, Ngoc Chi","first_name":"Ngoc Chi"},{"last_name":"Tünnermann","full_name":"Tünnermann, Jan","first_name":"Jan"},{"first_name":"Katharina J.","id":"50352","full_name":"Rohlfing, Katharina J.","last_name":"Rohlfing"},{"first_name":"Ingrid","last_name":"Scharlau","orcid":"0000-0003-2364-9489","full_name":"Scharlau, Ingrid","id":"451"}],"oa":"1","date_updated":"2024-12-02T09:41:36Z","intvolume":"        15","citation":{"apa":"Banh, N. C., Tünnermann, J., Rohlfing, K. J., &#38; Scharlau, I. (2024). Benefiting from Binary Negations? Verbal Negations Decrease Visual Attention and Balance Its Distribution. <i>Frontiers in Psychology</i>, <i>15</i>. <a href=\"https://doi.org/10.3389/fpsyg.2024.1451309\">https://doi.org/10.3389/fpsyg.2024.1451309</a>","mla":"Banh, Ngoc Chi, et al. “Benefiting from Binary Negations? Verbal Negations Decrease Visual Attention and Balance Its Distribution.” <i>Frontiers in Psychology</i>, vol. 15, 2024, doi:<a href=\"https://doi.org/10.3389/fpsyg.2024.1451309\">10.3389/fpsyg.2024.1451309</a>.","short":"N.C. Banh, J. Tünnermann, K.J. Rohlfing, I. Scharlau, Frontiers in Psychology 15 (2024).","bibtex":"@article{Banh_Tünnermann_Rohlfing_Scharlau_2024, title={Benefiting from Binary Negations? Verbal Negations Decrease Visual Attention and Balance Its Distribution}, volume={15}, DOI={<a href=\"https://doi.org/10.3389/fpsyg.2024.1451309\">10.3389/fpsyg.2024.1451309</a>}, journal={Frontiers in Psychology}, author={Banh, Ngoc Chi and Tünnermann, Jan and Rohlfing, Katharina J. and Scharlau, Ingrid}, year={2024} }","chicago":"Banh, Ngoc Chi, Jan Tünnermann, Katharina J. Rohlfing, and Ingrid Scharlau. “Benefiting from Binary Negations? Verbal Negations Decrease Visual Attention and Balance Its Distribution.” <i>Frontiers in Psychology</i> 15 (2024). <a href=\"https://doi.org/10.3389/fpsyg.2024.1451309\">https://doi.org/10.3389/fpsyg.2024.1451309</a>.","ieee":"N. C. Banh, J. Tünnermann, K. J. Rohlfing, and I. Scharlau, “Benefiting from Binary Negations? Verbal Negations Decrease Visual Attention and Balance Its Distribution,” <i>Frontiers in Psychology</i>, vol. 15, 2024, doi: <a href=\"https://doi.org/10.3389/fpsyg.2024.1451309\">10.3389/fpsyg.2024.1451309</a>.","ama":"Banh NC, Tünnermann J, Rohlfing KJ, Scharlau I. Benefiting from Binary Negations? Verbal Negations Decrease Visual Attention and Balance Its Distribution. <i>Frontiers in Psychology</i>. 2024;15. doi:<a href=\"https://doi.org/10.3389/fpsyg.2024.1451309\">10.3389/fpsyg.2024.1451309</a>"},"publication_status":"published"},{"_id":"49516","project":[{"grant_number":"438445824","name":"TRR 318: TRR 318 - Erklärbarkeit konstruieren","_id":"109"},{"name":"TRR 318 - A3: TRR 318 - Subproject A3","_id":"113"},{"name":"TRR 318 - A05: TRR 318 - Echtzeitmessung der Aufmerksamkeit im Mensch-Roboter-Erklärdialog (Teilprojekt A05)","_id":"115","grant_number":"438445824"}],"user_id":"93405","article_type":"original","file_date_updated":"2023-12-07T09:18:55Z","type":"journal_article","status":"public","date_updated":"2023-12-07T12:09:41Z","volume":10,"author":[{"last_name":"Groß","full_name":"Groß, André","first_name":"André"},{"last_name":"Schütze","full_name":"Schütze, Christian","first_name":"Christian"},{"last_name":"Brandt","full_name":"Brandt, Mara","first_name":"Mara"},{"first_name":"Britta","last_name":"Wrede","full_name":"Wrede, Britta"},{"last_name":"Richter","full_name":"Richter, Birte","first_name":"Birte"}],"doi":"10.3389/frobt.2023.1245501","has_accepted_license":"1","publication_identifier":{"issn":["2296-9144"]},"publication_status":"published","intvolume":"        10","citation":{"mla":"Groß, André, et al. “RISE: An Open-Source Architecture for Interdisciplinary and Reproducible Human–Robot Interaction Research.” <i>Frontiers in Robotics and AI</i>, vol. 10, Frontiers Media SA, 2023, doi:<a href=\"https://doi.org/10.3389/frobt.2023.1245501\">10.3389/frobt.2023.1245501</a>.","bibtex":"@article{Groß_Schütze_Brandt_Wrede_Richter_2023, title={RISE: an open-source architecture for interdisciplinary and reproducible human–robot interaction research}, volume={10}, DOI={<a href=\"https://doi.org/10.3389/frobt.2023.1245501\">10.3389/frobt.2023.1245501</a>}, journal={Frontiers in Robotics and AI}, publisher={Frontiers Media SA}, author={Groß, André and Schütze, Christian and Brandt, Mara and Wrede, Britta and Richter, Birte}, year={2023} }","short":"A. Groß, C. Schütze, M. Brandt, B. Wrede, B. Richter, Frontiers in Robotics and AI 10 (2023).","apa":"Groß, A., Schütze, C., Brandt, M., Wrede, B., &#38; Richter, B. (2023). RISE: an open-source architecture for interdisciplinary and reproducible human–robot interaction research. <i>Frontiers in Robotics and AI</i>, <i>10</i>. <a href=\"https://doi.org/10.3389/frobt.2023.1245501\">https://doi.org/10.3389/frobt.2023.1245501</a>","ieee":"A. Groß, C. Schütze, M. Brandt, B. Wrede, and B. Richter, “RISE: an open-source architecture for interdisciplinary and reproducible human–robot interaction research,” <i>Frontiers in Robotics and AI</i>, vol. 10, 2023, doi: <a href=\"https://doi.org/10.3389/frobt.2023.1245501\">10.3389/frobt.2023.1245501</a>.","chicago":"Groß, André, Christian Schütze, Mara Brandt, Britta Wrede, and Birte Richter. “RISE: An Open-Source Architecture for Interdisciplinary and Reproducible Human–Robot Interaction Research.” <i>Frontiers in Robotics and AI</i> 10 (2023). <a href=\"https://doi.org/10.3389/frobt.2023.1245501\">https://doi.org/10.3389/frobt.2023.1245501</a>.","ama":"Groß A, Schütze C, Brandt M, Wrede B, Richter B. RISE: an open-source architecture for interdisciplinary and reproducible human–robot interaction research. <i>Frontiers in Robotics and AI</i>. 2023;10. doi:<a href=\"https://doi.org/10.3389/frobt.2023.1245501\">10.3389/frobt.2023.1245501</a>"},"keyword":["Artificial Intelligence","Computer Science Applications"],"ddc":["000"],"language":[{"iso":"eng"}],"publication":"Frontiers in Robotics and AI","abstract":[{"text":"<jats:p>In this article, we present RISE—a <jats:bold>R</jats:bold>obotics <jats:bold>I</jats:bold>ntegration and <jats:bold>S</jats:bold>cenario-Management <jats:bold>E</jats:bold>xtensible-Architecture—for designing human–robot dialogs and conducting <jats:italic>Human–Robot Interaction</jats:italic> (HRI) studies. In current HRI research, interdisciplinarity in the creation and implementation of interaction studies is becoming increasingly important. In addition, there is a lack of reproducibility of the research results. With the presented open-source architecture, we aim to address these two topics. Therefore, we discuss the advantages and disadvantages of various existing tools from different sub-fields within robotics. Requirements for an architecture can be derived from this overview of the literature, which 1) supports interdisciplinary research, 2) allows reproducibility of the research, and 3) is accessible to other researchers in the field of HRI. With our architecture, we tackle these requirements by providing a <jats:italic>Graphical User Interface</jats:italic> which explains the robot behavior and allows introspection into the current state of the dialog. Additionally, it offers controlling possibilities to easily conduct <jats:italic>Wizard of Oz</jats:italic> studies. To achieve transparency, the dialog is modeled explicitly, and the robot behavior can be configured. Furthermore, the modular architecture offers an interface for external features and sensors and is expandable to new robots and modalities.</jats:p>","lang":"eng"}],"file":[{"content_type":"application/pdf","relation":"main_file","success":1,"creator":"angross","date_created":"2023-12-07T09:18:55Z","date_updated":"2023-12-07T09:18:55Z","file_id":"49517","access_level":"closed","file_name":"frobt-10-1245501.pdf","file_size":40679118}],"publisher":"Frontiers Media SA","date_created":"2023-12-07T09:17:09Z","title":"RISE: an open-source architecture for interdisciplinary and reproducible human–robot interaction research","year":"2023"},{"publisher":"MDPI AG","date_updated":"2024-02-26T08:44:32Z","author":[{"first_name":"Birte","full_name":"Richter, Birte","last_name":"Richter"},{"full_name":"Putze, Felix","last_name":"Putze","first_name":"Felix"},{"full_name":"Ivucic, Gabriel","last_name":"Ivucic","first_name":"Gabriel"},{"last_name":"Brandt","full_name":"Brandt, Mara","first_name":"Mara"},{"first_name":"Christian","full_name":"Schütze, Christian","last_name":"Schütze"},{"last_name":"Reisenhofer","full_name":"Reisenhofer, Rafael","first_name":"Rafael"},{"first_name":"Britta","last_name":"Wrede","full_name":"Wrede, Britta"},{"last_name":"Schultz","full_name":"Schultz, Tanja","first_name":"Tanja"}],"date_created":"2024-02-18T10:45:53Z","volume":7,"title":"EEG Correlates of Distractions and Hesitations in Human–Robot Interaction: A LabLinking Pilot Study","doi":"10.3390/mti7040037","publication_status":"published","publication_identifier":{"issn":["2414-4088"]},"issue":"4","year":"2023","citation":{"apa":"Richter, B., Putze, F., Ivucic, G., Brandt, M., Schütze, C., Reisenhofer, R., Wrede, B., &#38; Schultz, T. (2023). EEG Correlates of Distractions and Hesitations in Human–Robot Interaction: A LabLinking Pilot Study. <i>Multimodal Technologies and Interaction</i>, <i>7</i>(4), Article 37. <a href=\"https://doi.org/10.3390/mti7040037\">https://doi.org/10.3390/mti7040037</a>","short":"B. Richter, F. Putze, G. Ivucic, M. Brandt, C. Schütze, R. Reisenhofer, B. Wrede, T. Schultz, Multimodal Technologies and Interaction 7 (2023).","mla":"Richter, Birte, et al. “EEG Correlates of Distractions and Hesitations in Human–Robot Interaction: A LabLinking Pilot Study.” <i>Multimodal Technologies and Interaction</i>, vol. 7, no. 4, 37, MDPI AG, 2023, doi:<a href=\"https://doi.org/10.3390/mti7040037\">10.3390/mti7040037</a>.","bibtex":"@article{Richter_Putze_Ivucic_Brandt_Schütze_Reisenhofer_Wrede_Schultz_2023, title={EEG Correlates of Distractions and Hesitations in Human–Robot Interaction: A LabLinking Pilot Study}, volume={7}, DOI={<a href=\"https://doi.org/10.3390/mti7040037\">10.3390/mti7040037</a>}, number={437}, journal={Multimodal Technologies and Interaction}, publisher={MDPI AG}, author={Richter, Birte and Putze, Felix and Ivucic, Gabriel and Brandt, Mara and Schütze, Christian and Reisenhofer, Rafael and Wrede, Britta and Schultz, Tanja}, year={2023} }","ama":"Richter B, Putze F, Ivucic G, et al. EEG Correlates of Distractions and Hesitations in Human–Robot Interaction: A LabLinking Pilot Study. <i>Multimodal Technologies and Interaction</i>. 2023;7(4). doi:<a href=\"https://doi.org/10.3390/mti7040037\">10.3390/mti7040037</a>","ieee":"B. Richter <i>et al.</i>, “EEG Correlates of Distractions and Hesitations in Human–Robot Interaction: A LabLinking Pilot Study,” <i>Multimodal Technologies and Interaction</i>, vol. 7, no. 4, Art. no. 37, 2023, doi: <a href=\"https://doi.org/10.3390/mti7040037\">10.3390/mti7040037</a>.","chicago":"Richter, Birte, Felix Putze, Gabriel Ivucic, Mara Brandt, Christian Schütze, Rafael Reisenhofer, Britta Wrede, and Tanja Schultz. “EEG Correlates of Distractions and Hesitations in Human–Robot Interaction: A LabLinking Pilot Study.” <i>Multimodal Technologies and Interaction</i> 7, no. 4 (2023). <a href=\"https://doi.org/10.3390/mti7040037\">https://doi.org/10.3390/mti7040037</a>."},"intvolume":"         7","project":[{"_id":"113","name":"TRR 318 - A3: TRR 318 - Subproject A3"},{"_id":"115","name":"TRR 318 - A05: TRR 318 - Echtzeitmessung der Aufmerksamkeit im Mensch-Roboter-Erklärdialog (Teilprojekt A05)","grant_number":"438445824"}],"_id":"51371","user_id":"54779","department":[{"_id":"660"}],"article_number":"37","keyword":["Computer Networks and Communications","Computer Science Applications","Human-Computer Interaction","Neuroscience (miscellaneous)"],"language":[{"iso":"eng"}],"type":"journal_article","publication":"Multimodal Technologies and Interaction","abstract":[{"lang":"eng","text":"<jats:p>In this paper, we investigate the effect of distractions and hesitations as a scaffolding strategy. Recent research points to the potential beneficial effects of a speaker’s hesitations on the listeners’ comprehension of utterances, although results from studies on this issue indicate that humans do not make strategic use of them. The role of hesitations and their communicative function in human-human interaction is a much-discussed topic in current research. To better understand the underlying cognitive processes, we developed a human–robot interaction (HRI) setup that allows the measurement of the electroencephalogram (EEG) signals of a human participant while interacting with a robot. We thereby address the research question of whether we find effects on single-trial EEG based on the distraction and the corresponding robot’s hesitation scaffolding strategy. To carry out the experiments, we leverage our LabLinking method, which enables interdisciplinary joint research between remote labs. This study could not have been conducted without LabLinking, as the two involved labs needed to combine their individual expertise and equipment to achieve the goal together. The results of our study indicate that the EEG correlates in the distracted condition are different from the baseline condition without distractions. Furthermore, we could differentiate the EEG correlates of distraction with and without a hesitation scaffolding strategy. This proof-of-concept study shows that LabLinking makes it possible to conduct collaborative HRI studies in remote laboratories and lays the first foundation for more in-depth research into robotic scaffolding strategies.</jats:p>"}],"status":"public"},{"quality_controlled":"1","citation":{"short":"N.C. Banh, I. Scharlau, in: S. Merz, C. Frings, B. Leuchtenberg, B. Moeller, S. Mueller, R. Neumann, B. Pastötter, L. Pingen, G. Schui (Eds.), Abstracts of the 65th TeaP, ZPID (Leibniz Institute for Psychology), 2023.","mla":"Banh, Ngoc Chi, and Ingrid Scharlau. “First Steps towards Real-Time Assessment of Attentional Weights and Capacity According to TVA.” <i>Abstracts of the 65th TeaP</i>, edited by Simon Merz et al., ZPID (Leibniz Institute for Psychology), 2023, doi:<a href=\"https://doi.org/10.23668/PSYCHARCHIVES.12945\">10.23668/PSYCHARCHIVES.12945</a>.","bibtex":"@inproceedings{Banh_Scharlau_2023, title={First steps towards real-time assessment of attentional weights and capacity according to TVA}, DOI={<a href=\"https://doi.org/10.23668/PSYCHARCHIVES.12945\">10.23668/PSYCHARCHIVES.12945</a>}, booktitle={Abstracts of the 65th TeaP}, publisher={ZPID (Leibniz Institute for Psychology)}, author={Banh, Ngoc Chi and Scharlau, Ingrid}, editor={Merz, Simon and Frings, Christian and Leuchtenberg, Bettina and Moeller, Birte and Mueller, Stefanie and Neumann, Roland and Pastötter, Bernhard and Pingen, Leah and Schui, Gabriel}, year={2023} }","apa":"Banh, N. C., &#38; Scharlau, I. (2023). First steps towards real-time assessment of attentional weights and capacity according to TVA. In S. Merz, C. Frings, B. Leuchtenberg, B. Moeller, S. Mueller, R. Neumann, B. Pastötter, L. Pingen, &#38; G. Schui (Eds.), <i>Abstracts of the 65th TeaP</i>. ZPID (Leibniz Institute for Psychology). <a href=\"https://doi.org/10.23668/PSYCHARCHIVES.12945\">https://doi.org/10.23668/PSYCHARCHIVES.12945</a>","ama":"Banh NC, Scharlau I. First steps towards real-time assessment of attentional weights and capacity according to TVA. In: Merz S, Frings C, Leuchtenberg B, et al., eds. <i>Abstracts of the 65th TeaP</i>. ZPID (Leibniz Institute for Psychology); 2023. doi:<a href=\"https://doi.org/10.23668/PSYCHARCHIVES.12945\">10.23668/PSYCHARCHIVES.12945</a>","ieee":"N. C. Banh and I. Scharlau, “First steps towards real-time assessment of attentional weights and capacity according to TVA,” in <i>Abstracts of the 65th TeaP</i>, Trier, Germany, 2023, doi: <a href=\"https://doi.org/10.23668/PSYCHARCHIVES.12945\">10.23668/PSYCHARCHIVES.12945</a>.","chicago":"Banh, Ngoc Chi, and Ingrid Scharlau. “First Steps towards Real-Time Assessment of Attentional Weights and Capacity According to TVA.” In <i>Abstracts of the 65th TeaP</i>, edited by Simon Merz, Christian Frings, Bettina Leuchtenberg, Birte Moeller, Stefanie Mueller, Roland Neumann, Bernhard Pastötter, Leah Pingen, and Gabriel Schui. ZPID (Leibniz Institute for Psychology), 2023. <a href=\"https://doi.org/10.23668/PSYCHARCHIVES.12945\">https://doi.org/10.23668/PSYCHARCHIVES.12945</a>."},"year":"2023","date_created":"2023-08-03T13:10:02Z","author":[{"first_name":"Ngoc Chi","last_name":"Banh","orcid":"0000-0002-5946-4542","full_name":"Banh, Ngoc Chi","id":"38219"},{"full_name":"Scharlau, Ingrid","id":"451","orcid":"0000-0003-2364-9489","last_name":"Scharlau","first_name":"Ingrid"}],"publisher":"ZPID (Leibniz Institute for Psychology)","oa":"1","date_updated":"2024-03-27T10:41:59Z","doi":"10.23668/PSYCHARCHIVES.12945","conference":{"start_date":"2023-03-26","name":"Tagung experimentell arbeitender Psycholog:innen (TeaP)","location":"Trier, Germany","end_date":"2023-03-29"},"main_file_link":[{"url":"https://pada.psycharchives.org/bitstream/3ec340a2-095e-42e2-b998-524856efce07","open_access":"1"}],"title":"First steps towards real-time assessment of attentional weights and capacity according to TVA","publication":"Abstracts of the 65th TeaP","type":"conference_abstract","status":"public","editor":[{"last_name":"Merz","full_name":"Merz, Simon","first_name":"Simon"},{"last_name":"Frings","full_name":"Frings, Christian","first_name":"Christian"},{"last_name":"Leuchtenberg","full_name":"Leuchtenberg, Bettina","first_name":"Bettina"},{"first_name":"Birte","last_name":"Moeller","full_name":"Moeller, Birte"},{"first_name":"Stefanie","full_name":"Mueller, Stefanie","last_name":"Mueller"},{"last_name":"Neumann","full_name":"Neumann, Roland","first_name":"Roland"},{"last_name":"Pastötter","full_name":"Pastötter, Bernhard","first_name":"Bernhard"},{"last_name":"Pingen","full_name":"Pingen, Leah","first_name":"Leah"},{"first_name":"Gabriel","full_name":"Schui, Gabriel","last_name":"Schui"}],"department":[{"_id":"424"}],"user_id":"38219","_id":"46283","project":[{"grant_number":"438445824","name":"TRR 318 - A05: TRR 318 - Echtzeitmessung der Aufmerksamkeit im Mensch-Roboter-Erklärdialog (Teilprojekt A05)","_id":"115"},{"_id":"52","name":"PC2: Computing Resources Provided by the Paderborn Center for Parallel Computing"}],"language":[{"iso":"eng"}]},{"publication":"Frontiers in Robotics and AI","abstract":[{"lang":"eng","text":"Explanation has been identified as an important capability for AI-based systems, but research on systematic strategies for achieving understanding in interaction with such systems is still sparse. Negation is a linguistic strategy that is often used in explanations. It creates a contrast space between the affirmed and the negated item that enriches explaining processes with additional contextual information. While negation in human speech has been shown to lead to higher processing costs and worse task performance in terms of recall or action execution when used in isolation, it can decrease processing costs when used in context. So far, it has not been considered as a guiding strategy for explanations in human-robot interaction. We conducted an empirical study to investigate the use of negation as a guiding strategy in explanatory human-robot dialogue, in which a virtual robot explains tasks and possible actions to a human explainee to solve them in terms of gestures on a touchscreen. Our results show that negation vs. affirmation 1) increases processing costs measured as reaction time and 2) increases several aspects of task performance. While there was no significant effect of negation on the number of initially correctly executed gestures, we found a significantly lower number of attempts—measured as breaks in the finger movement data before the correct gesture was carried out—when being instructed through a negation. We further found that the gestures significantly resembled the presented prototype gesture more following an instruction with a negation as opposed to an affirmation. Also, the participants rated the benefit of contrastive vs. affirmative explanations significantly higher. Repeating the instructions decreased the effects of negation, yielding similar processing costs and task performance measures for negation and affirmation after several iterations. We discuss our results with respect to possible effects of negation on linguistic processing of explanations and limitations of our study."}],"keyword":["HRI","XAI","negation","understanding","explaining","touch interaction","gesture"],"language":[{"iso":"eng"}],"quality_controlled":"1","year":"2023","date_created":"2023-10-30T09:29:16Z","title":"Scaffolding the human partner by contrastive guidance in an explanatory human-robot dialogue","type":"journal_article","status":"public","project":[{"grant_number":"438445824","name":"TRR 318 - A05: TRR 318 - Echtzeitmessung der Aufmerksamkeit im Mensch-Roboter-Erklärdialog (Teilprojekt A05)","_id":"115"}],"_id":"48543","user_id":"38219","department":[{"_id":"749"}],"article_type":"original","funded_apc":"1","publication_status":"published","citation":{"ama":"Groß A, Singh A, Banh NC, et al. Scaffolding the human partner by contrastive guidance in an explanatory human-robot dialogue. <i>Frontiers in Robotics and AI</i>. 2023;10. doi:<a href=\"https://doi.org/10.3389/frobt.2023.1236184\">10.3389/frobt.2023.1236184</a>","ieee":"A. Groß <i>et al.</i>, “Scaffolding the human partner by contrastive guidance in an explanatory human-robot dialogue,” <i>Frontiers in Robotics and AI</i>, vol. 10, 2023, doi: <a href=\"https://doi.org/10.3389/frobt.2023.1236184\">10.3389/frobt.2023.1236184</a>.","chicago":"Groß, A., Amit Singh, Ngoc Chi Banh, B. Richter, Ingrid Scharlau, Katharina J. Rohlfing, and B. Wrede. “Scaffolding the Human Partner by Contrastive Guidance in an Explanatory Human-Robot Dialogue.” <i>Frontiers in Robotics and AI</i> 10 (2023). <a href=\"https://doi.org/10.3389/frobt.2023.1236184\">https://doi.org/10.3389/frobt.2023.1236184</a>.","apa":"Groß, A., Singh, A., Banh, N. C., Richter, B., Scharlau, I., Rohlfing, K. J., &#38; Wrede, B. (2023). Scaffolding the human partner by contrastive guidance in an explanatory human-robot dialogue. <i>Frontiers in Robotics and AI</i>, <i>10</i>. <a href=\"https://doi.org/10.3389/frobt.2023.1236184\">https://doi.org/10.3389/frobt.2023.1236184</a>","short":"A. Groß, A. Singh, N.C. Banh, B. Richter, I. Scharlau, K.J. Rohlfing, B. Wrede, Frontiers in Robotics and AI 10 (2023).","bibtex":"@article{Groß_Singh_Banh_Richter_Scharlau_Rohlfing_Wrede_2023, title={Scaffolding the human partner by contrastive guidance in an explanatory human-robot dialogue}, volume={10}, DOI={<a href=\"https://doi.org/10.3389/frobt.2023.1236184\">10.3389/frobt.2023.1236184</a>}, journal={Frontiers in Robotics and AI}, author={Groß, A. and Singh, Amit and Banh, Ngoc Chi and Richter, B. and Scharlau, Ingrid and Rohlfing, Katharina J. and Wrede, B.}, year={2023} }","mla":"Groß, A., et al. “Scaffolding the Human Partner by Contrastive Guidance in an Explanatory Human-Robot Dialogue.” <i>Frontiers in Robotics and AI</i>, vol. 10, 2023, doi:<a href=\"https://doi.org/10.3389/frobt.2023.1236184\">10.3389/frobt.2023.1236184</a>."},"intvolume":"        10","oa":"1","date_updated":"2024-06-26T08:01:50Z","author":[{"first_name":"A.","last_name":"Groß","full_name":"Groß, A."},{"orcid":"0000-0002-7789-1521","last_name":"Singh","full_name":"Singh, Amit","id":"91018","first_name":"Amit"},{"first_name":"Ngoc Chi","id":"38219","full_name":"Banh, Ngoc Chi","last_name":"Banh","orcid":"0000-0002-5946-4542"},{"last_name":"Richter","full_name":"Richter, B.","first_name":"B."},{"orcid":"0000-0003-2364-9489","last_name":"Scharlau","full_name":"Scharlau, Ingrid","id":"451","first_name":"Ingrid"},{"last_name":"Rohlfing","full_name":"Rohlfing, Katharina J.","id":"50352","first_name":"Katharina J."},{"first_name":"B.","full_name":"Wrede, B.","last_name":"Wrede"}],"volume":10,"main_file_link":[{"open_access":"1","url":"https://www.frontiersin.org/articles/10.3389/frobt.2023.1236184/full"}],"doi":"10.3389/frobt.2023.1236184"},{"title":"Contrastiveness in the context of action demonstration: an eye-tracking study on its effects on action perception and action recall","date_created":"2023-07-15T12:16:42Z","publisher":"Cognitive Science Society","year":"2023","quality_controlled":"1","language":[{"iso":"eng"}],"keyword":["Attention","negation","contrastive  guidance","eye-movements","action understanding","event representation"],"abstract":[{"lang":"eng","text":"<p>The study investigates two different ways of guiding the addressee of an explanation - an explainee, through action demonstration: contrastive and non-contrastive. Their effect was tested on attention to specific action elements (goal) as well as on event memory. In an eye-tracking experiment, participants were shown different motion videos that were either contrastive or non-contrastive with respect to the segments of movement presentation. Given that everyday action demonstration is often multimodal, the stimuli were created with re- spect to their visual and verbal presentation. For visual presentation, a video combined two movements in a contrastive (e.g., Up-motion following a Down-motion) or non-contrastive way (e.g., two Up-motions following each other). For verbal presentation, each video was combined with a sequence of instruction descriptions in the form of negative (i.e., contrastive) or assertive (i.e., non-contrastive) guidance. It was found that a) attention to the event goal increased for this condition in the later time window, and b) participants’ recall of the event was facilitated when a visually contrastive motion was combined with a verbal contrast.</p>"}],"publication":"Proceedings of the Annual Meeting of the Cognitive Science Society 45 (45)","conference":{"location":"Sydney","name":"45th Annual Conference of the Cognitive Science Society"},"main_file_link":[{"url":"https://escholarship.org/uc/item/2w94t4cv","open_access":"1"}],"author":[{"first_name":"Amit","full_name":"Singh, Amit","id":"91018","orcid":"0000-0002-7789-1521","last_name":"Singh"},{"first_name":"Katharina J.","id":"50352","full_name":"Rohlfing, Katharina J.","last_name":"Rohlfing"}],"oa":"1","date_updated":"2023-09-27T13:51:42Z","citation":{"ieee":"A. Singh and K. J. Rohlfing, “Contrastiveness in the context of action demonstration: an eye-tracking study on its effects on action perception and action recall,” presented at the 45th Annual Conference of the Cognitive Science Society, Sydney, 2023.","chicago":"Singh, Amit, and Katharina J. Rohlfing. “Contrastiveness in the Context of Action Demonstration: An Eye-Tracking Study on Its Effects on Action Perception and Action Recall.” In <i>Proceedings of the Annual Meeting of the Cognitive Science Society 45 (45)</i>. Sydney, Australia: Cognitive Science Society, 2023.","ama":"Singh A, Rohlfing KJ. Contrastiveness in the context of action demonstration: an eye-tracking study on its effects on action perception and action recall. In: <i>Proceedings of the Annual Meeting of the Cognitive Science Society 45 (45)</i>. Cognitive Science Society; 2023.","apa":"Singh, A., &#38; Rohlfing, K. J. (2023). Contrastiveness in the context of action demonstration: an eye-tracking study on its effects on action perception and action recall. <i>Proceedings of the Annual Meeting of the Cognitive Science Society 45 (45)</i>. 45th Annual Conference of the Cognitive Science Society, Sydney.","mla":"Singh, Amit, and Katharina J. Rohlfing. “Contrastiveness in the Context of Action Demonstration: An Eye-Tracking Study on Its Effects on Action Perception and Action Recall.” <i>Proceedings of the Annual Meeting of the Cognitive Science Society 45 (45)</i>, Cognitive Science Society, 2023.","bibtex":"@inproceedings{Singh_Rohlfing_2023, place={Sydney, Australia}, title={Contrastiveness in the context of action demonstration: an eye-tracking study on its effects on action perception and action recall}, booktitle={Proceedings of the Annual Meeting of the Cognitive Science Society 45 (45)}, publisher={Cognitive Science Society}, author={Singh, Amit and Rohlfing, Katharina J.}, year={2023} }","short":"A. Singh, K.J. Rohlfing, in: Proceedings of the Annual Meeting of the Cognitive Science Society 45 (45), Cognitive Science Society, Sydney, Australia, 2023."},"place":"Sydney, Australia","related_material":{"record":[{"relation":"contains","id":"46067","status":"public"}]},"publication_status":"published","department":[{"_id":"749"},{"_id":"660"}],"user_id":"91018","_id":"46067","project":[{"grant_number":"438445824","name":"TRR 318 - A05: TRR 318 - Echtzeitmessung der Aufmerksamkeit im Mensch-Roboter-Erklärdialog (Teilprojekt A05)","_id":"115"}],"status":"public","popular_science":"1","type":"conference"},{"publication":"KI - Künstliche Intelligenz","abstract":[{"text":"<jats:title>Abstract</jats:title><jats:p>With the perspective on applications of AI-technology, especially data intensive deep learning approaches, the need for methods to control and understand such models has been recognized and gave rise to a new research domain labeled explainable artificial intelligence (XAI). In this overview paper we give an interim appraisal of what has been achieved so far and where there are still gaps in the research. We take an interdisciplinary perspective to identify challenges on XAI research and point to open questions with respect to the quality of the explanations regarding faithfulness and consistency of explanations. On the other hand we see a need regarding the interaction between XAI and user to allow for adaptability to specific information needs and explanatory dialog for informed decision making as well as the possibility to correct models and explanations by interaction. This endeavor requires an integrated interdisciplinary perspective and rigorous approaches to empirical evaluation based on psychological, linguistic and even sociological theories.</jats:p>","lang":"eng"}],"keyword":["Artificial Intelligence"],"language":[{"iso":"eng"}],"issue":"3-4","year":"2022","publisher":"Springer Science and Business Media LLC","date_created":"2024-02-14T09:41:56Z","title":"What is Missing in XAI So Far?","type":"journal_article","status":"public","project":[{"name":"TRR 318 - A3: TRR 318 - Subproject A3","_id":"113"},{"grant_number":"438445824","_id":"115","name":"TRR 318 - A05: TRR 318 - Echtzeitmessung der Aufmerksamkeit im Mensch-Roboter-Erklärdialog (Teilprojekt A05)"}],"_id":"51348","user_id":"54779","department":[{"_id":"660"}],"alternative_title":["An Interdisciplinary Perspective"],"publication_status":"published","publication_identifier":{"issn":["0933-1875","1610-1987"]},"citation":{"ieee":"U. Schmid and B. Wrede, “What is Missing in XAI So Far?,” <i>KI - Künstliche Intelligenz</i>, vol. 36, no. 3–4, pp. 303–315, 2022, doi: <a href=\"https://doi.org/10.1007/s13218-022-00786-2\">10.1007/s13218-022-00786-2</a>.","chicago":"Schmid, Ute, and Britta Wrede. “What Is Missing in XAI So Far?” <i>KI - Künstliche Intelligenz</i> 36, no. 3–4 (2022): 303–15. <a href=\"https://doi.org/10.1007/s13218-022-00786-2\">https://doi.org/10.1007/s13218-022-00786-2</a>.","ama":"Schmid U, Wrede B. What is Missing in XAI So Far? <i>KI - Künstliche Intelligenz</i>. 2022;36(3-4):303-315. doi:<a href=\"https://doi.org/10.1007/s13218-022-00786-2\">10.1007/s13218-022-00786-2</a>","apa":"Schmid, U., &#38; Wrede, B. (2022). What is Missing in XAI So Far? <i>KI - Künstliche Intelligenz</i>, <i>36</i>(3–4), 303–315. <a href=\"https://doi.org/10.1007/s13218-022-00786-2\">https://doi.org/10.1007/s13218-022-00786-2</a>","short":"U. Schmid, B. Wrede, KI - Künstliche Intelligenz 36 (2022) 303–315.","bibtex":"@article{Schmid_Wrede_2022, title={What is Missing in XAI So Far?}, volume={36}, DOI={<a href=\"https://doi.org/10.1007/s13218-022-00786-2\">10.1007/s13218-022-00786-2</a>}, number={3–4}, journal={KI - Künstliche Intelligenz}, publisher={Springer Science and Business Media LLC}, author={Schmid, Ute and Wrede, Britta}, year={2022}, pages={303–315} }","mla":"Schmid, Ute, and Britta Wrede. “What Is Missing in XAI So Far?” <i>KI - Künstliche Intelligenz</i>, vol. 36, no. 3–4, Springer Science and Business Media LLC, 2022, pp. 303–15, doi:<a href=\"https://doi.org/10.1007/s13218-022-00786-2\">10.1007/s13218-022-00786-2</a>."},"intvolume":"        36","page":"303-315","date_updated":"2024-02-26T08:48:49Z","author":[{"last_name":"Schmid","full_name":"Schmid, Ute","first_name":"Ute"},{"first_name":"Britta","full_name":"Wrede, Britta","last_name":"Wrede"}],"volume":36,"doi":"10.1007/s13218-022-00786-2"},{"issue":"3-4","year":"2022","date_created":"2024-02-18T10:03:11Z","publisher":"Springer Science and Business Media LLC","title":"Explainable AI","publication":"KI - Künstliche Intelligenz","language":[{"iso":"eng"}],"keyword":["Artificial Intelligence"],"publication_status":"published","publication_identifier":{"issn":["0933-1875","1610-1987"]},"citation":{"ieee":"U. Schmid and B. Wrede, “Explainable AI,” <i>KI - Künstliche Intelligenz</i>, vol. 36, no. 3–4, pp. 207–210, 2022, doi: <a href=\"https://doi.org/10.1007/s13218-022-00788-0\">10.1007/s13218-022-00788-0</a>.","chicago":"Schmid, Ute, and Britta Wrede. “Explainable AI.” <i>KI - Künstliche Intelligenz</i> 36, no. 3–4 (2022): 207–10. <a href=\"https://doi.org/10.1007/s13218-022-00788-0\">https://doi.org/10.1007/s13218-022-00788-0</a>.","ama":"Schmid U, Wrede B. Explainable AI. <i>KI - Künstliche Intelligenz</i>. 2022;36(3-4):207-210. doi:<a href=\"https://doi.org/10.1007/s13218-022-00788-0\">10.1007/s13218-022-00788-0</a>","apa":"Schmid, U., &#38; Wrede, B. (2022). Explainable AI. <i>KI - Künstliche Intelligenz</i>, <i>36</i>(3–4), 207–210. <a href=\"https://doi.org/10.1007/s13218-022-00788-0\">https://doi.org/10.1007/s13218-022-00788-0</a>","mla":"Schmid, Ute, and Britta Wrede. “Explainable AI.” <i>KI - Künstliche Intelligenz</i>, vol. 36, no. 3–4, Springer Science and Business Media LLC, 2022, pp. 207–10, doi:<a href=\"https://doi.org/10.1007/s13218-022-00788-0\">10.1007/s13218-022-00788-0</a>.","short":"U. Schmid, B. Wrede, KI - Künstliche Intelligenz 36 (2022) 207–210.","bibtex":"@article{Schmid_Wrede_2022, title={Explainable AI}, volume={36}, DOI={<a href=\"https://doi.org/10.1007/s13218-022-00788-0\">10.1007/s13218-022-00788-0</a>}, number={3–4}, journal={KI - Künstliche Intelligenz}, publisher={Springer Science and Business Media LLC}, author={Schmid, Ute and Wrede, Britta}, year={2022}, pages={207–210} }"},"page":"207-210","intvolume":"        36","author":[{"first_name":"Ute","last_name":"Schmid","full_name":"Schmid, Ute"},{"last_name":"Wrede","full_name":"Wrede, Britta","first_name":"Britta"}],"volume":36,"date_updated":"2024-02-26T08:48:00Z","doi":"10.1007/s13218-022-00788-0","type":"journal_article","status":"public","user_id":"54779","department":[{"_id":"660"}],"project":[{"_id":"113","name":"TRR 318 - A3: TRR 318 - Subproject A3"},{"name":"TRR 318 - A05: TRR 318 - Echtzeitmessung der Aufmerksamkeit im Mensch-Roboter-Erklärdialog (Teilprojekt A05)","_id":"115","grant_number":"438445824"}],"_id":"51366"},{"date_created":"2024-02-14T09:07:53Z","publisher":"Frontiers Media SA","title":"Which “motionese” parameters change with children's age? Disentangling attention-getting from action-structuring modifications","year":"2022","language":[{"iso":"eng"}],"keyword":["Social Sciences (miscellaneous)","Communication"],"publication":"Frontiers in Communication","abstract":[{"lang":"eng","text":"<jats:p>Modified action demonstration—dubbed <jats:italic>motionese—</jats:italic>has been proposed as a way to help children recognize the structure and meaning of actions. However, until now, it has been investigated only in young infants. This brief research report presents findings from a cross-sectional study of parental action demonstrations to three groups of 8–11, 12–23, and 24–30-month-old children that applied seven motionese parameters; a second study investigated the youngest group of participants longitudinally to corroborate the cross-sectional results. Results of both studies suggested that four motionese parameters (Motion Pauses, Pace, Velocity, Acceleration) seem to structure the action by organizing it in motion pauses. Whereas these parameters persist over different ages, three other parameters (Demonstration Length, Roundness, and Range) occur predominantly in the younger group and seem to serve to organize infants' attention on the basis of movement. Results are discussed in terms of facilitative vs. pedagogical learning.</jats:p>"}],"author":[{"first_name":"Katharina","id":"50352","full_name":"Rohlfing, Katharina","last_name":"Rohlfing"},{"first_name":"Anna-Lisa","last_name":"Vollmer","full_name":"Vollmer, Anna-Lisa"},{"last_name":"Fritsch","full_name":"Fritsch, Jannik","first_name":"Jannik"},{"first_name":"Britta","last_name":"Wrede","full_name":"Wrede, Britta"}],"volume":7,"date_updated":"2024-02-26T08:53:33Z","doi":"10.3389/fcomm.2022.922405","publication_status":"published","publication_identifier":{"issn":["2297-900X"]},"citation":{"bibtex":"@article{Rohlfing_Vollmer_Fritsch_Wrede_2022, title={Which “motionese” parameters change with children’s age? Disentangling attention-getting from action-structuring modifications}, volume={7}, DOI={<a href=\"https://doi.org/10.3389/fcomm.2022.922405\">10.3389/fcomm.2022.922405</a>}, journal={Frontiers in Communication}, publisher={Frontiers Media SA}, author={Rohlfing, Katharina and Vollmer, Anna-Lisa and Fritsch, Jannik and Wrede, Britta}, year={2022} }","short":"K. Rohlfing, A.-L. Vollmer, J. Fritsch, B. Wrede, Frontiers in Communication 7 (2022).","mla":"Rohlfing, Katharina, et al. “Which ‘Motionese’ Parameters Change with Children’s Age? Disentangling Attention-Getting from Action-Structuring Modifications.” <i>Frontiers in Communication</i>, vol. 7, Frontiers Media SA, 2022, doi:<a href=\"https://doi.org/10.3389/fcomm.2022.922405\">10.3389/fcomm.2022.922405</a>.","apa":"Rohlfing, K., Vollmer, A.-L., Fritsch, J., &#38; Wrede, B. (2022). Which “motionese” parameters change with children’s age? Disentangling attention-getting from action-structuring modifications. <i>Frontiers in Communication</i>, <i>7</i>. <a href=\"https://doi.org/10.3389/fcomm.2022.922405\">https://doi.org/10.3389/fcomm.2022.922405</a>","ama":"Rohlfing K, Vollmer A-L, Fritsch J, Wrede B. Which “motionese” parameters change with children’s age? Disentangling attention-getting from action-structuring modifications. <i>Frontiers in Communication</i>. 2022;7. doi:<a href=\"https://doi.org/10.3389/fcomm.2022.922405\">10.3389/fcomm.2022.922405</a>","ieee":"K. Rohlfing, A.-L. Vollmer, J. Fritsch, and B. Wrede, “Which ‘motionese’ parameters change with children’s age? Disentangling attention-getting from action-structuring modifications,” <i>Frontiers in Communication</i>, vol. 7, 2022, doi: <a href=\"https://doi.org/10.3389/fcomm.2022.922405\">10.3389/fcomm.2022.922405</a>.","chicago":"Rohlfing, Katharina, Anna-Lisa Vollmer, Jannik Fritsch, and Britta Wrede. “Which ‘Motionese’ Parameters Change with Children’s Age? Disentangling Attention-Getting from Action-Structuring Modifications.” <i>Frontiers in Communication</i> 7 (2022). <a href=\"https://doi.org/10.3389/fcomm.2022.922405\">https://doi.org/10.3389/fcomm.2022.922405</a>."},"intvolume":"         7","user_id":"54779","department":[{"_id":"660"}],"project":[{"_id":"111","name":"TRR 318 - A01: TRR 318 - Adaptives Erklären (Teilprojekt A01)","grant_number":"438445824"},{"_id":"113","name":"TRR 318 - A3: TRR 318 - Subproject A3"},{"name":"TRR 318 - A05: TRR 318 - Echtzeitmessung der Aufmerksamkeit im Mensch-Roboter-Erklärdialog (Teilprojekt A05)","_id":"115","grant_number":"438445824"}],"_id":"51344","type":"journal_article","status":"public"},{"type":"conference","publication":"INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION","status":"public","project":[{"grant_number":"438445824","_id":"115","name":"TRR 318 - A05: TRR 318 - Echtzeitmessung der Aufmerksamkeit im Mensch-Roboter-Erklärdialog (Teilprojekt A05)"},{"name":"TRR 318 - A3: TRR 318 - Subproject A3","_id":"113"}],"_id":"51346","user_id":"54779","department":[{"_id":"660"}],"language":[{"iso":"eng"}],"publication_status":"published","year":"2022","citation":{"mla":"Groß, André, et al. “An Architecture Supporting Configurable Autonomous Multimodal Joint-Attention-Therapy for Various Robotic Systems.” <i>INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION</i>, ACM, 2022, pp. 154–59, doi:<a href=\"https://doi.org/10.1145/3536220.3558070\">10.1145/3536220.3558070</a>.","short":"A. Groß, C. Schütze, B. Wrede, B. Richter, in: INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION, ACM, 2022, pp. 154–159.","bibtex":"@inproceedings{Groß_Schütze_Wrede_Richter_2022, title={An Architecture Supporting Configurable Autonomous Multimodal Joint-Attention-Therapy for Various Robotic Systems}, DOI={<a href=\"https://doi.org/10.1145/3536220.3558070\">10.1145/3536220.3558070</a>}, booktitle={INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION}, publisher={ACM}, author={Groß, André and Schütze, Christian and Wrede, Britta and Richter, Birte}, year={2022}, pages={154–159} }","apa":"Groß, A., Schütze, C., Wrede, B., &#38; Richter, B. (2022). An Architecture Supporting Configurable Autonomous Multimodal Joint-Attention-Therapy for Various Robotic Systems. <i>INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION</i>, 154–159. <a href=\"https://doi.org/10.1145/3536220.3558070\">https://doi.org/10.1145/3536220.3558070</a>","ieee":"A. Groß, C. Schütze, B. Wrede, and B. Richter, “An Architecture Supporting Configurable Autonomous Multimodal Joint-Attention-Therapy for Various Robotic Systems,” in <i>INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION</i>, 2022, pp. 154–159, doi: <a href=\"https://doi.org/10.1145/3536220.3558070\">10.1145/3536220.3558070</a>.","chicago":"Groß, André, Christian Schütze, Britta Wrede, and Birte Richter. “An Architecture Supporting Configurable Autonomous Multimodal Joint-Attention-Therapy for Various Robotic Systems.” In <i>INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION</i>, 154–59. ACM, 2022. <a href=\"https://doi.org/10.1145/3536220.3558070\">https://doi.org/10.1145/3536220.3558070</a>.","ama":"Groß A, Schütze C, Wrede B, Richter B. An Architecture Supporting Configurable Autonomous Multimodal Joint-Attention-Therapy for Various Robotic Systems. In: <i>INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION</i>. ACM; 2022:154-159. doi:<a href=\"https://doi.org/10.1145/3536220.3558070\">10.1145/3536220.3558070</a>"},"page":"154-159","date_updated":"2024-02-26T08:52:52Z","publisher":"ACM","date_created":"2024-02-14T09:28:57Z","author":[{"orcid":"0000-0002-9593-7220","last_name":"Groß","full_name":"Groß, André","id":"93405","first_name":"André"},{"full_name":"Schütze, Christian","last_name":"Schütze","first_name":"Christian"},{"first_name":"Britta","full_name":"Wrede, Britta","last_name":"Wrede"},{"last_name":"Richter","full_name":"Richter, Birte","first_name":"Birte"}],"title":"An Architecture Supporting Configurable Autonomous Multimodal Joint-Attention-Therapy for Various Robotic Systems","doi":"10.1145/3536220.3558070"}]
