@inproceedings{61190,
  author       = {{Sengupta, Meghdut and Muschalik, Maximilian  and Fumagalli, Fabian and Hammer, Barbara and Hüllermeier, Eyke  and Ghosh, Debanjan and Wachsmuth, Henning}},
  booktitle    = {{Accepted in Findings }},
  publisher    = {{EMNLP }},
  title        = {{{Investigating the Impact of Conceptual Metaphors on LLM-based NLI through Shapley Interactions}}},
  year         = {{2025}},
}

@inproceedings{55338,
  abstract     = {{Metaphorical language is a pivotal element inthe realm of political framing. Existing workfrom linguistics and the social sciences providescompelling evidence regarding the distinctivenessof conceptual framing for politicalideology perspectives. However, the nature andutilization of metaphors and the effect on audiencesof different political ideologies withinpolitical discourses are hardly explored. Toenable research in this direction, in this workwe create a dataset, originally based on newseditorials and labeled with their persuasive effectson liberals and conservatives and extend itwith annotations pertaining to metaphorical usageof language. To that end, first, we identifyall single metaphors and composite metaphors.Secondly, we provide annotations of the sourceand target domains for each metaphor. As aresult, our corpus consists of 300 news editorialsannotated with spans of texts containingmetaphors and the corresponding domains ofwhich these metaphors draw from. Our analysisshows that liberal readers are affected bymetaphors, whereas conservatives are resistantto them. Both ideologies are affected differentlybased on the metaphor source and targetcategory. For example, liberals are affected bymetaphors in the Darkness {&} Light (e.g., death)source domains, where as the source domain ofNature affects conservatives more significantly.}},
  author       = {{Sengupta, Meghdut and El Baff, Roxanne and Alshomary, Milad and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)}},
  editor       = {{Duh, Kevin and Gomez, Helena and Bethard, Steven}},
  pages        = {{3621–3631}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{Analyzing the Use of Metaphors in News Editorials for Political Framing}}},
  year         = {{2024}},
}

@inproceedings{55404,
  abstract     = {{Explanations are pervasive in our lives. Mostly, they occur in dialogical form where an explainer discusses a concept or phenomenon of interest with an explainee. Leaving the explainee with a clear understanding is not straightforward due to the knowledge gap between the two participants. Previous research looked at the interaction of explanation moves, dialogue acts, and topics in successful dialogues with expert explainers. However, daily-life explanations often fail, raising the question of what makes a dialogue successful. In this work, we study explanation dialogues in terms of the interactions between the explainer and explainee and how they correlate with the quality of explanations in terms of a successful understanding on the explainee{’}s side. In particular, we first construct a corpus of 399 dialogues from the Reddit forum {Explain Like I am Five} and annotate it for interaction flows and explanation quality. We then analyze the interaction flows, comparing them to those appearing in expert dialogues. Finally, we encode the interaction flows using two language models that can handle long inputs, and we provide empirical evidence for the effectiveness boost gained through the encoding in predicting the success of explanation dialogues.}},
  author       = {{Alshomary, Milad and Lange, Felix and Booshehri, Meisam and Sengupta, Meghdut and Cimiano, Philipp and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)}},
  editor       = {{Calzolari, Nicoletta and Kan, Min-Yen and Hoste, Veronique and Lenci, Alessandro and Sakti, Sakriani and Xue, Nianwen}},
  pages        = {{11523–11536}},
  publisher    = {{ELRA and ICCL}},
  title        = {{{Modeling the Quality of Dialogical Explanations}}},
  year         = {{2024}},
}

@inproceedings{55406,
  abstract     = {{Metaphorical language, such as {“}spending time together{”}, projects meaning from a source domain (here, $money$) to a target domain ($time$). Thereby, it highlights certain aspects of the target domain, such as the $effort$ behind the time investment. Highlighting aspects with metaphors (while hiding others) bridges the two domains and is the core of metaphorical meaning construction. For metaphor interpretation, linguistic theories stress that identifying the highlighted aspects is important for a better understanding of metaphors. However, metaphor research in NLP has not yet dealt with the phenomenon of highlighting. In this paper, we introduce the task of identifying the main aspect highlighted in a metaphorical sentence. Given the inherent interaction of source domains and highlighted aspects, we propose two multitask approaches - a joint learning approach and a continual learning approach - based on a finetuned contrastive learning model to jointly predict highlighted aspects and source domains. We further investigate whether (predicted) information about a source domain leads to better performance in predicting the highlighted aspects, and vice versa. Our experiments on an existing corpus suggest that, with the corresponding information, the performance to predict the other improves in terms of model accuracy in predicting highlighted aspects and source domains notably compared to the single-task baselines.}},
  author       = {{Sengupta, Meghdut and Alshomary, Milad and Scharlau, Ingrid and Wachsmuth, Henning}},
  booktitle    = {{Findings of the Association for Computational Linguistics: EMNLP 2023}},
  editor       = {{Bouamor, Houda and Pino, Juan and Bali, Kalika}},
  pages        = {{4636–4659}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{Modeling Highlighting of Metaphors in Multitask Contrastive Learning Paradigms}}},
  doi          = {{10.18653/v1/2023.findings-emnlp.308}},
  year         = {{2023}},
}

@inproceedings{34067,
  author       = {{Sengupta, Meghdut and Alshomary, Milad and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 2022 Workshop on Figurative Language Processing}},
  title        = {{{Back to the Roots: Predicting the Source Domain of Metaphors using Contrastive Learning}}},
  year         = {{2022}},
}

