@inproceedings{64827,
  author       = {{Porwol, Philip Fabian and Körber, Miriam and Kern, Friederike  and Schulte, Carsten and Scharlau, Ingrid}},
  booktitle    = {{Proceedings of the 3rd TRR 318 Conference: Contextualizing Explanations}},
  editor       = {{Cimiano, Philip and Paaßen, Benjamin and Vollmer, Anna-Lisa}},
  location     = {{Bielefeld}},
  publisher    = {{Bielefeld University Press}},
  title        = {{{Framing what and how to think: Lay people’s metaphors for algorithms}}},
  doi          = {{10.64136/ubio9074}},
  year         = {{2026}},
}

@unpublished{59839,
  abstract     = {{In many scientific approaches, especially in those that try to foster explainability of Artificial Intelligences, a narrow conception of explaining prevails. This narrow conception implies that explaining is a one-directional action in which knowledge is transferred from the explainer to an addressee. By studying the amount of agency in metaphors for explaining in scientific texts, we want to find out – or at least to contribute a partial answer to the question – why this narrow conception is so dominant. For our analysis, we use a linguistic conception of agency, transitivity. This concept allows to specify the degree of agency or effectiveness of the action in a verbalised event. It is defined by several component parts. We detail and discuss both the parameters of and global transitivity. Overall, transitivity of explaining metaphors has a rather common pattern across metaphors. Agency is not high and reduced in characteristic aspects: The metaphors imply that the object of explaining is static, i.e., is not changed within the explanation, and that explaining is the activity of one person only. This pattern may account for the narrow conception of explaining. It contrasts strongly with current co-constructive or sociotechnical approaches to explainability.}},
  author       = {{Scharlau, Ingrid and Rohlfing, Katharina J.}},
  publisher    = {{Center for Open Science}},
  title        = {{{Agency in metaphors of explaining: An analysis of scientific texts}}},
  year         = {{2025}},
}

@misc{59922,
  author       = {{Porwol, Philip and Scharlau, Ingrid}},
  publisher    = {{OSF}},
  title        = {{{An annotated corpus of elicited metaphors of explaining and understanding using MIPVU}}},
  doi          = {{10.17605/OSF.IO/Y6SMX}},
  year         = {{2025}},
}

@unpublished{60718,
  abstract     = {{The ability to generate explanations that are understood by explainees is the
quintessence of explainable artificial intelligence. Since understanding
depends on the explainee's background and needs, recent research focused on
co-constructive explanation dialogues, where an explainer continuously monitors
the explainee's understanding and adapts their explanations dynamically. We
investigate the ability of large language models (LLMs) to engage as explainers
in co-constructive explanation dialogues. In particular, we present a user
study in which explainees interact with an LLM in two settings, one of which
involves the LLM being instructed to explain a topic co-constructively. We
evaluate the explainees' understanding before and after the dialogue, as well
as their perception of the LLMs' co-constructive behavior. Our results suggest
that LLMs show some co-constructive behaviors, such as asking verification
questions, that foster the explainees' engagement and can improve understanding
of a topic. However, their ability to effectively monitor the current
understanding and scaffold the explanations accordingly remains limited.}},
  author       = {{Fichtel, Leandra and Spliethöver, Maximilian and Hüllermeier, Eyke and Jimenez, Patricia and Klowait, Nils and Kopp, Stefan and Ngonga Ngomo, Axel-Cyrille and Robrecht, Amelie and Scharlau, Ingrid and Terfloth, Lutz and Vollmer, Anna-Lisa and Wachsmuth, Henning}},
  booktitle    = {{arXiv:2504.18483}},
  pages        = {{20}},
  title        = {{{Investigating Co-Constructive Behavior of Large Language Models in  Explanation Dialogues}}},
  year         = {{2025}},
}

@inproceedings{61190,
  author       = {{Sengupta, Meghdut and Muschalik, Maximilian  and Fumagalli, Fabian and Hammer, Barbara and Hüllermeier, Eyke  and Ghosh, Debanjan and Wachsmuth, Henning}},
  booktitle    = {{Accepted in Findings }},
  publisher    = {{EMNLP }},
  title        = {{{Investigating the Impact of Conceptual Metaphors on LLM-based NLI through Shapley Interactions}}},
  year         = {{2025}},
}

@inproceedings{61234,
  abstract     = {{The ability to generate explanations that are understood by explainees is the
quintessence of explainable artificial intelligence. Since understanding
depends on the explainee's background and needs, recent research focused on
co-constructive explanation dialogues, where an explainer continuously monitors
the explainee's understanding and adapts their explanations dynamically. We
investigate the ability of large language models (LLMs) to engage as explainers
in co-constructive explanation dialogues. In particular, we present a user
study in which explainees interact with an LLM in two settings, one of which
involves the LLM being instructed to explain a topic co-constructively. We
evaluate the explainees' understanding before and after the dialogue, as well
as their perception of the LLMs' co-constructive behavior. Our results suggest
that LLMs show some co-constructive behaviors, such as asking verification
questions, that foster the explainees' engagement and can improve understanding
of a topic. However, their ability to effectively monitor the current
understanding and scaffold the explanations accordingly remains limited.}},
  author       = {{Fichtel, Leandra and Spliethöver, Maximilian and Hüllermeier, Eyke and Jimenez, Patricia and Klowait, Nils and Kopp, Stefan and Ngonga Ngomo, Axel-Cyrille and Robrecht, Amelie and Scharlau, Ingrid and Terfloth, Lutz and Vollmer, Anna-Lisa and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 26th Annual Meeting of the Special Interest Group on Discourse and Dialogue}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{Investigating Co-Constructive Behavior of Large Language Models in  Explanation Dialogues}}},
  year         = {{2025}},
}

@misc{59921,
  author       = {{Scharlau, Ingrid and Miriam, Körber}},
  publisher    = {{OSF}},
  title        = {{{Metaphors in 24 WIRED Level 5 Videos (Data corpus)}}},
  doi          = {{10.17605/OSF.IO/94A2J}},
  year         = {{2025}},
}

@article{61244,
  abstract     = {{Explanations play a crucial role in knowledge transfer and meaning-making and are often described as a co-constructive process in which multiple agents collaboratively shape understanding. However, the metaphors used to conceptualize explaining may influence how this process is framed. This study investigates the extent to which the co-constructive nature of explaining is represented in explaining metaphors. Using a systematic analysis of agency, we examined how these metaphors depict the explanation process and the roles of the agents involved. We found that explaining metaphors lack collaboration between explainer and addressee, constructiveness of the process, as well as bidirectionality and iterativeness. In light of current research on metaphorical framing, the study thus highlights the risk that such explaining metaphors may reinforce a non-co-constructive perspective on explaining and a top-down approach in the development of AI systems as well as other areas.}},
  author       = {{Porwol, Philip Fabian and Scharlau, Ingrid}},
  journal      = {{Frontiers in Psychology}},
  title        = {{{Is explaining more like showing or more like building? Agency in metaphors of explaining}}},
  doi          = {{https://doi.org/10.3389/fpsyg.2025.1628706}},
  year         = {{2025}},
}

@article{62932,
  abstract     = {{Many previous studies on the conceptual function of metaphors have focused on their func-tion  of  highlighting  aspects  of  target  concepts.  From  the  beginning  of  this  research,  it  was knownthat conceptual metaphors also hide aspects of the target concept; however, this as-pect has been less studied. This study builds upon the idea that the hiding aspect of a specific metaphor should be identified in relation to other metaphors for the same concept. A method is presented to detail this relation based on the theory of semantic frames and the FrameNet resource to identify the hidden aspects and apply it to a corpus of 298 elicited metaphor texts on the target concept of understanding. The analysis revealed that certain conceptual aspects are consistently hidden by a majority of metaphors, pointing to patterns in conceptualization. Using  this  approach,  six  aspects  frequently  hidden  by  metaphors  were  identified:  Sociality, Transfer, Ownership, Perception, Foundation and Duration.}},
  author       = {{Porwol, Philip Fabian and Scharlau, Ingrid}},
  journal      = {{STUDIA NEOFILOLOGICZ: NEROZPRAWY JĘZYKOZNAWCZE (Modern Language Studies: Linguistic Essays)}},
  pages        = {{181--198}},
  publisher    = {{Uniwersytet Jana Długosza w Częstochowie}},
  title        = {{{What do metaphors of understanding hide?}}},
  volume       = {{XXI}},
  year         = {{2025}},
}

@inproceedings{55338,
  abstract     = {{Metaphorical language is a pivotal element inthe realm of political framing. Existing workfrom linguistics and the social sciences providescompelling evidence regarding the distinctivenessof conceptual framing for politicalideology perspectives. However, the nature andutilization of metaphors and the effect on audiencesof different political ideologies withinpolitical discourses are hardly explored. Toenable research in this direction, in this workwe create a dataset, originally based on newseditorials and labeled with their persuasive effectson liberals and conservatives and extend itwith annotations pertaining to metaphorical usageof language. To that end, first, we identifyall single metaphors and composite metaphors.Secondly, we provide annotations of the sourceand target domains for each metaphor. As aresult, our corpus consists of 300 news editorialsannotated with spans of texts containingmetaphors and the corresponding domains ofwhich these metaphors draw from. Our analysisshows that liberal readers are affected bymetaphors, whereas conservatives are resistantto them. Both ideologies are affected differentlybased on the metaphor source and targetcategory. For example, liberals are affected bymetaphors in the Darkness {&} Light (e.g., death)source domains, where as the source domain ofNature affects conservatives more significantly.}},
  author       = {{Sengupta, Meghdut and El Baff, Roxanne and Alshomary, Milad and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)}},
  editor       = {{Duh, Kevin and Gomez, Helena and Bethard, Steven}},
  pages        = {{3621–3631}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{Analyzing the Use of Metaphors in News Editorials for Political Framing}}},
  year         = {{2024}},
}

@article{58109,
  abstract     = {{The present study aims to understand how metaphors are used in explanations. According to many current theories, metaphors have a conceptual function for the understanding of abstract objects. From this theoretical assumption, we derived the hypothesis that the lower the expertise of the addressee of an explanation, the more metaphors should be used. We tested this hypothesis on a relatively natural data set of 24 published videos with close to 100,000 words overall in which experts explain abstract, mostly scientific concepts to persons of different expertise, varying from minimal (children) to profound (expert). Contrary to our expectations, the frequency of metaphors did not decrease with expertise, but actually increased. This increase could be statistically substantiated with higher differences in expertise. The study contributes to a better understanding of the use of metaphors in actual explanatory processes and how metaphor use depends on contextual factors. It thus supports the expansion of the conceptual and linguistic perspective on metaphors to include the aspect of how metaphors are used by speakers.}},
  author       = {{Scharlau, Ingrid and Körber, Miriam and Sengupta, Meghdut and Wachsmuth, Henning}},
  journal      = {{Frontiers in Language Sciences}},
  keywords     = {{metaphor, conceptual metaphor, conceptual metaphor theory, metaphor usage, explaining, explanation}},
  pages        = {{1474924}},
  title        = {{{When to use a metaphor: Metaphors in dialogical explanations with addressees of different expertise}}},
  volume       = {{3}},
  year         = {{2024}},
}

@inproceedings{55406,
  abstract     = {{Metaphorical language, such as {“}spending time together{”}, projects meaning from a source domain (here, $money$) to a target domain ($time$). Thereby, it highlights certain aspects of the target domain, such as the $effort$ behind the time investment. Highlighting aspects with metaphors (while hiding others) bridges the two domains and is the core of metaphorical meaning construction. For metaphor interpretation, linguistic theories stress that identifying the highlighted aspects is important for a better understanding of metaphors. However, metaphor research in NLP has not yet dealt with the phenomenon of highlighting. In this paper, we introduce the task of identifying the main aspect highlighted in a metaphorical sentence. Given the inherent interaction of source domains and highlighted aspects, we propose two multitask approaches - a joint learning approach and a continual learning approach - based on a finetuned contrastive learning model to jointly predict highlighted aspects and source domains. We further investigate whether (predicted) information about a source domain leads to better performance in predicting the highlighted aspects, and vice versa. Our experiments on an existing corpus suggest that, with the corresponding information, the performance to predict the other improves in terms of model accuracy in predicting highlighted aspects and source domains notably compared to the single-task baselines.}},
  author       = {{Sengupta, Meghdut and Alshomary, Milad and Scharlau, Ingrid and Wachsmuth, Henning}},
  booktitle    = {{Findings of the Association for Computational Linguistics: EMNLP 2023}},
  editor       = {{Bouamor, Houda and Pino, Juan and Bali, Kalika}},
  pages        = {{4636–4659}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{Modeling Highlighting of Metaphors in Multitask Contrastive Learning Paradigms}}},
  doi          = {{10.18653/v1/2023.findings-emnlp.308}},
  year         = {{2023}},
}

@inproceedings{34067,
  author       = {{Sengupta, Meghdut and Alshomary, Milad and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 2022 Workshop on Figurative Language Processing}},
  title        = {{{Back to the Roots: Predicting the Source Domain of Metaphors using Contrastive Learning}}},
  year         = {{2022}},
}

