@inbook{61222,
  author       = {{Lenke, Michael and Klowait, Nils and Biere, Lea and Schulte, Carsten}},
  booktitle    = {{Lecture Notes in Computer Science}},
  isbn         = {{9783032012210}},
  issn         = {{0302-9743}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Assessing AI Literacy: A Systematic Review of Questionnaires with Emphasis on Affective, Behavioral, Cognitive, and Ethical Aspects}}},
  doi          = {{10.1007/978-3-032-01222-7_8}},
  year         = {{2025}},
}

@inproceedings{61225,
  author       = {{Lenke, Michael and Lehner, Lukas and Landman, Martina}},
  booktitle    = {{2025 IEEE Global Engineering Education Conference (EDUCON)}},
  publisher    = {{IEEE}},
  title        = {{{“I'm Actually More Interested in AI Than in Computer Science” - 12-Year-Olds Describing Their First Encounter with AI}}},
  doi          = {{10.1109/educon62633.2025.11016657}},
  year         = {{2025}},
}

@inproceedings{61224,
  author       = {{Lenke, Michael and Schulte, Carsten}},
  booktitle    = {{2025 IEEE Global Engineering Education Conference (EDUCON)}},
  publisher    = {{IEEE}},
  title        = {{{Enhancing AI Interaction through Co-Construction: A Multi-Faceted Workshop Framework}}},
  doi          = {{10.1109/educon62633.2025.11016326}},
  year         = {{2025}},
}

@article{56190,
  abstract     = {{This study investigates the potential of using advanced conversational artificial intelligence (AI) to help people understand complex AI systems. In line with conversation-analytic research, we view the participatory role of AI as dynamically unfolding in a situation rather than being predetermined by its architecture. To study user sensemaking of intransparent AI systems, we set up a naturalistic encounter between human participants and two AI systems developed in-house: a reinforcement learning simulation and a GPT-4-based explainer chatbot. Our results reveal that an explainer-AI only truly functions as such when participants actively engage with it as a co-constructive agent. Both the interface’s spatial configuration and the asynchronous temporal nature of the explainer AI – combined with the users’ presuppositions about its role – contribute to the decision whether to treat the AI as a dialogical co-participant in the interaction. Participants establish evidentiality conventions and sensemaking procedures that may diverge from a system’s intended design or function.}},
  author       = {{Klowait, Nils and Erofeeva, Maria and Lenke, Michael and Horwath, Ilona and Buschmeier, Hendrik}},
  journal      = {{Discourse & Communication}},
  number       = {{6}},
  pages        = {{917--930}},
  publisher    = {{Sage}},
  title        = {{{Can AI explain AI? Interactive co-construction of explanations among human and artificial agents}}},
  doi          = {{10.1177/17504813241267069}},
  volume       = {{18}},
  year         = {{2024}},
}

