@inbook{61877,
  abstract     = {{<jats:title>Abstract</jats:title>
          <jats:p>Research indicates that anger is a prevalent emotion in human-technology interactions, often leading to frustration, rejection and reduced trust, significantly impacting user experience and acceptance of technology. Particularly in high-risk or uncertain situations, where AI explanations are intended to help users make more informed decisions, decision-making is influenced by emotional factors, impairing understanding and leading to suboptimal choices. While XAI research continues to evolve, greater consideration of users’ emotions and individual characteristics remains necessary. Broadening empirical studies in this area could foster a more comprehensive understanding of decision-making processes following explanations, especially in relation to the interaction between emotions and cognition. In response, this study seeks to contribute to this area by employing an experimental design to examine the effects of AI explanations and emotion regulation on user reliance and trust of emotional users. The results provide a foundation for future human-centered research in XAI, focusing on the impact of emotions and cognition in human-technology interactions.</jats:p>}},
  author       = {{Lammert, Olesja}},
  booktitle    = {{Communications in Computer and Information Science}},
  isbn         = {{9783032083326}},
  issn         = {{1865-0929}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Can AI Regulate Your Emotions? An Empirical Investigation of the Influence of AI Explanations and Emotion Regulation on Human Decision-Making Factors}}},
  doi          = {{10.1007/978-3-032-08333-3_11}},
  year         = {{2025}},
}

@article{52202,
  author       = {{Lammert, Olesja and Richter, Birte and Schütze, Christian and Thommes, Kirsten and Wrede, Britta}},
  journal      = {{Frontiers in Behavioral Economics}},
  title        = {{{Humans in XAI: Increased Reliance in Decision-Making Under Uncertainty by Using Explanation Strategies}}},
  doi          = {{10.3389/frbhe.2024.1377075}},
  year         = {{2024}},
}

@inproceedings{55177,
  author       = {{Thommes, Kirsten and Lammert, Olesja and Schütze, Christian and Richter, Birte and Wrede, Britta}},
  booktitle    = {{Communications in Computer and Information Science}},
  isbn         = {{9783031638022}},
  issn         = {{1865-0929}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Human Emotions in AI Explanations}}},
  doi          = {{10.1007/978-3-031-63803-9_15}},
  year         = {{2024}},
}

@inproceedings{57250,
  author       = {{Schütze, Christian and Richter, Birte and Lammert, Olesja and Thommes, Kirsten and Wrede, Britta}},
  booktitle    = {{HAI '24: Proceedings of the 12th International Conference on Human-Agent Interaction}},
  isbn         = {{9798400711787}},
  pages        = {{141--149}},
  publisher    = {{ACM}},
  title        = {{{Static Socio-demographic and Individual Factors for Generating Explanations in XAI: Can they serve as a prior in DSS for adaptation of explanation strategies?}}},
  doi          = {{10.1145/3687272.3688300}},
  year         = {{2024}},
}

@inproceedings{55178,
  author       = {{Thommes, Kirsten and Lammert, Olesja and Schütze, Christian and Richter, Birte and Wrede, Britta}},
  title        = {{{Human Emotions in AI Explanations}}},
  year         = {{2024}},
}

@inproceedings{55403,
  abstract     = {{In this paper we consider the interactive processes by which an explainer and an explainee cooperate to produce an explanation, which we refer to as co-construction. Explainable Artificial Intelligence (XAI) is concerned with the development of intelligent systems and robots that can explain and justify their actions, decisions, recommendations, and so on. However, the cooperative construction of explanations remains a key but under-explored issue. This short paper proposes an architecture for intelligent systems that promotes a co-constructive and interactive approach to explanation generation. By outlining its basic components and their specific roles, we aim to contribute to the advancement of XAI computational frameworks that actively engage users in the explanation process.}},
  author       = {{Buschmeier, Hendrik and Cimiano, Philipp and Kopp, Stefan and Kornowicz, Jaroslaw and Lammert, Olesja and Matarese, Marco and Mindlin, Dimitry and Robrecht, Amelie Sophie and Vollmer, Anna-Lisa and Wagner, Petra and Wrede, Britta and Booshehri, Meisam}},
  booktitle    = {{Proceedings of the 2024 Workshop on Explainability Engineering}},
  location     = {{Lisbon, Portugal}},
  pages        = {{20--25}},
  publisher    = {{ACM}},
  title        = {{{Towards a Computational Architecture for Co-Constructive Explainable Systems}}},
  doi          = {{10.1145/3648505.3648509}},
  year         = {{2024}},
}

@inproceedings{48285,
  author       = {{Lebedeva, Anastasia and Kornowicz, Jaroslaw and Lammert, Olesja and Papenkordt, Jörg}},
  booktitle    = {{Artificial Intelligence in HCI}},
  title        = {{{The Role of Response Time for Algorithm Aversion in Fast and Slow Thinking Tasks}}},
  doi          = {{10.1007/978-3-031-35891-3_9}},
  year         = {{2023}},
}

@inproceedings{48280,
  author       = {{Schütze, Christian and Lammert, Olesja and Richter, Birte and Thommes, Kirsten and Wrede, Britta}},
  booktitle    = {{Artificial Intelligence in HCI}},
  title        = {{{Emotional Debiasing Explanations for Decisions in HCI}}},
  doi          = {{10.1007/978-3-031-35891-3_20}},
  year         = {{2023}},
}

