@inproceedings{55338,
  abstract     = {{Metaphorical language is a pivotal element inthe realm of political framing. Existing workfrom linguistics and the social sciences providescompelling evidence regarding the distinctivenessof conceptual framing for politicalideology perspectives. However, the nature andutilization of metaphors and the effect on audiencesof different political ideologies withinpolitical discourses are hardly explored. Toenable research in this direction, in this workwe create a dataset, originally based on newseditorials and labeled with their persuasive effectson liberals and conservatives and extend itwith annotations pertaining to metaphorical usageof language. To that end, first, we identifyall single metaphors and composite metaphors.Secondly, we provide annotations of the sourceand target domains for each metaphor. As aresult, our corpus consists of 300 news editorialsannotated with spans of texts containingmetaphors and the corresponding domains ofwhich these metaphors draw from. Our analysisshows that liberal readers are affected bymetaphors, whereas conservatives are resistantto them. Both ideologies are affected differentlybased on the metaphor source and targetcategory. For example, liberals are affected bymetaphors in the Darkness {&} Light (e.g., death)source domains, where as the source domain ofNature affects conservatives more significantly.}},
  author       = {{Sengupta, Meghdut and El Baff, Roxanne and Alshomary, Milad and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)}},
  editor       = {{Duh, Kevin and Gomez, Helena and Bethard, Steven}},
  pages        = {{3621–3631}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{Analyzing the Use of Metaphors in News Editorials for Political Framing}}},
  year         = {{2024}},
}

@inproceedings{55404,
  abstract     = {{Explanations are pervasive in our lives. Mostly, they occur in dialogical form where an explainer discusses a concept or phenomenon of interest with an explainee. Leaving the explainee with a clear understanding is not straightforward due to the knowledge gap between the two participants. Previous research looked at the interaction of explanation moves, dialogue acts, and topics in successful dialogues with expert explainers. However, daily-life explanations often fail, raising the question of what makes a dialogue successful. In this work, we study explanation dialogues in terms of the interactions between the explainer and explainee and how they correlate with the quality of explanations in terms of a successful understanding on the explainee{’}s side. In particular, we first construct a corpus of 399 dialogues from the Reddit forum {Explain Like I am Five} and annotate it for interaction flows and explanation quality. We then analyze the interaction flows, comparing them to those appearing in expert dialogues. Finally, we encode the interaction flows using two language models that can handle long inputs, and we provide empirical evidence for the effectiveness boost gained through the encoding in predicting the success of explanation dialogues.}},
  author       = {{Alshomary, Milad and Lange, Felix and Booshehri, Meisam and Sengupta, Meghdut and Cimiano, Philipp and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)}},
  editor       = {{Calzolari, Nicoletta and Kan, Min-Yen and Hoste, Veronique and Lenci, Alessandro and Sakti, Sakriani and Xue, Nianwen}},
  pages        = {{11523–11536}},
  publisher    = {{ELRA and ICCL}},
  title        = {{{Modeling the Quality of Dialogical Explanations}}},
  year         = {{2024}},
}

@inproceedings{55406,
  abstract     = {{Metaphorical language, such as {“}spending time together{”}, projects meaning from a source domain (here, $money$) to a target domain ($time$). Thereby, it highlights certain aspects of the target domain, such as the $effort$ behind the time investment. Highlighting aspects with metaphors (while hiding others) bridges the two domains and is the core of metaphorical meaning construction. For metaphor interpretation, linguistic theories stress that identifying the highlighted aspects is important for a better understanding of metaphors. However, metaphor research in NLP has not yet dealt with the phenomenon of highlighting. In this paper, we introduce the task of identifying the main aspect highlighted in a metaphorical sentence. Given the inherent interaction of source domains and highlighted aspects, we propose two multitask approaches - a joint learning approach and a continual learning approach - based on a finetuned contrastive learning model to jointly predict highlighted aspects and source domains. We further investigate whether (predicted) information about a source domain leads to better performance in predicting the highlighted aspects, and vice versa. Our experiments on an existing corpus suggest that, with the corresponding information, the performance to predict the other improves in terms of model accuracy in predicting highlighted aspects and source domains notably compared to the single-task baselines.}},
  author       = {{Sengupta, Meghdut and Alshomary, Milad and Scharlau, Ingrid and Wachsmuth, Henning}},
  booktitle    = {{Findings of the Association for Computational Linguistics: EMNLP 2023}},
  editor       = {{Bouamor, Houda and Pino, Juan and Bali, Kalika}},
  pages        = {{4636–4659}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{Modeling Highlighting of Metaphors in Multitask Contrastive Learning Paradigms}}},
  doi          = {{10.18653/v1/2023.findings-emnlp.308}},
  year         = {{2023}},
}

@inproceedings{58723,
  abstract     = {{In real-world debates, the most common way to counter an argument is to reason against its main point, that is, its conclusion. Existing work on the automatic generation of natural language counter-arguments does not address the relation to the conclusion, possibly because many arguments leave their conclusion implicit. In this paper, we hypothesize that the key to effective counter-argument generation is to explicitly model the argument‘s conclusion and to ensure that the stance of the generated counter is opposite to that conclusion. In particular, we propose a multitask approach that jointly learns to generate both the conclusion and the counter of an input argument. The approach employs a stance-based ranking component that selects the counter from a diverse set of generated candidates whose stance best opposes the generated conclusion. In both automatic and manual evaluation, we provide evidence that our approach generates more relevant and stance-adhering counters than strong baselines.}},
  author       = {{Alshomary, Milad and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics}},
  editor       = {{Vlachos, Andreas and Augenstein, Isabelle}},
  pages        = {{957–967}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{Conclusion-based Counter-Argument Generation}}},
  doi          = {{10.18653/v1/2023.eacl-main.67}},
  year         = {{2023}},
}

@inproceedings{33004,
  author       = {{Wachsmuth, Henning and Alshomary, Milad}},
  booktitle    = {{Proceedings of the 29th International Conference on Computational Linguistics}},
  pages        = {{344 -- 354}},
  title        = {{{"Mama Always Had a Way of Explaining Things So I Could Understand": A Dialogue Corpus for Learning How to Explain}}},
  year         = {{2022}},
}

@inproceedings{22157,
  author       = {{Kiesel, Johannes and Alshomary, Milad and Handke, Nicolas and Cai, Xiaoni and Wachsmuth, Henning and Stein, Benno}},
  booktitle    = {{Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics}},
  pages        = {{4459 -- 4471}},
  title        = {{{Identifying the Human Values behind Arguments}}},
  year         = {{2022}},
}

@inproceedings{34051,
  abstract     = {{An argument is a constellation of premises reasoning towards a certain conclusion. The automatic generation of conclusions is becoming a very prominent task, raising the need for automatic measures to assess the quality of these generated conclusions. The SharedTask at the 9th Workshop on Argument Mining proposes a new task to assess the novelty and validity of a conclusion given a set of premises. In this paper, we present a multitask learning approach that transfers the knowledge learned from the natural language inference task to the tasks at hand. Evaluation results indicate the importance of both knowledge transfer and joint learning, placing our approach in the fifth place with strong results compared to baselines.}},
  author       = {{Alshomary, Milad and Stahl, Maja}},
  booktitle    = {{Proceedings of the 9th Workshop on Argument Mining}},
  pages        = {{111–114}},
  publisher    = {{International Conference on Computational Linguistics}},
  title        = {{{Argument Novelty and Validity Assessment via Multitask and Transfer Learning}}},
  year         = {{2022}},
}

@inproceedings{55337,
  abstract     = {{As AI is more and more pervasive in everyday life, humans have an increasing demand to understand its behavior and decisions. Most research on explainable AI builds on the premise that there is one ideal explanation to be found. In fact, however, everyday explanations are co-constructed in a dialogue between the person explaining (the explainer) and the specific person being explained to (the explainee). In this paper, we introduce a first corpus of dialogical explanations to enable NLP research on how humans explain as well as on how AI can learn to imitate this process. The corpus consists of 65 transcribed English dialogues from the Wired video series 5 Levels, explaining 13 topics to five explainees of different proficiency. All 1550 dialogue turns have been manually labeled by five independent professionals for the topic discussed as well as for the dialogue act and the explanation move performed. We analyze linguistic patterns of explainers and explainees, and we explore differences across proficiency levels. BERT-based baseline results indicate that sequence information helps predicting topics, acts, and moves effectively.}},
  author       = {{Wachsmuth, Henning and Alshomary, Milad}},
  booktitle    = {{Proceedings of the 29th International Conference on Computational Linguistics}},
  editor       = {{Calzolari, Nicoletta and Huang, Chu-Ren and Kim, Hansaem and Pustejovsky, James and Wanner, Leo and Choi, Key-Sun and Ryu, Pum-Mo and Chen, Hsin-Hsi and Donatelli, Lucia and Ji, Heng and Kurohashi, Sadao and Paggio, Patrizia and Xue, Nianwen and Kim, Seokhwan and Hahm, Younggyun and He, Zhong and Lee, Tony Kyungil and Santus, Enrico and Bond, Francis and Na, Seung-Hoon}},
  pages        = {{344–354}},
  publisher    = {{International Committee on Computational Linguistics}},
  title        = {{{“Mama Always Had a Way of Explaining Things So I Could Understand”: A Dialogue Corpus for Learning to Construct Explanations}}},
  year         = {{2022}},
}

@inproceedings{34067,
  author       = {{Sengupta, Meghdut and Alshomary, Milad and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 2022 Workshop on Figurative Language Processing}},
  title        = {{{Back to the Roots: Predicting the Source Domain of Metaphors using Contrastive Learning}}},
  year         = {{2022}},
}

@inproceedings{32247,
  author       = {{Alshomary, Milad and Rieskamp, Jonas and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 9th International Conference on Computational Models of Argument}},
  pages        = {{21 -- 31}},
  title        = {{{Generating Contrastive Snippets for Argument Search}}},
  doi          = {{http://dx.doi.org/10.3233/FAIA220138}},
  year         = {{2022}},
}

@inproceedings{30840,
  author       = {{Alshomary, Milad and El Baff, Roxanne and Gurcke, Timon and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics}},
  pages        = {{8782 -- 8797}},
  title        = {{{The Moral Debater: A Study on the Computational Generation of Morally Framed Arguments}}},
  year         = {{2022}},
}

@inproceedings{22158,
  author       = {{Syed, Shahbaz and Al-Khatib, Khalid and Alshomary, Milad and Wachsmuth, Henning and Potthast, Martin}},
  booktitle    = {{Proceedings of the Joint Conference of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (ACL-IJCNLP 2021): Findings}},
  pages        = {{3482--3493}},
  title        = {{{Generating Informative Conclusions for Argumentative Texts}}},
  year         = {{2021}},
}

@article{22449,
  author       = {{Alshomary, Milad and Wachsmuth, Henning}},
  journal      = {{Patterns}},
  number       = {{6}},
  title        = {{{Toward Audience-aware Argument Generation}}},
  volume       = {{2}},
  year         = {{2021}},
}

@inproceedings{25297,
  author       = {{Alshomary, Milad and Gurcke, Timon and Syed, Shahbaz and Heinisch, Philipp and Spliethöver, Maximilian and Cimiano, Philipp and Potthast, Martin and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 8th Workshop on Argument Mining}},
  pages        = {{184 -- 189}},
  title        = {{{Key Point Analysis via Contrastive Learning and Extractive Argument Summarization}}},
  year         = {{2021}},
}

@inproceedings{25295,
  author       = {{Gurcke, Timon and Alshomary, Milad and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 8th Workshop on Argument Mining}},
  pages        = {{67 -- 77}},
  title        = {{{Assessing the Sufficiency of Arguments through Conclusion Generation}}},
  year         = {{2021}},
}

@inproceedings{22229,
  author       = {{Alshomary, Milad and Syed, Shahbaz and Potthast, Martin and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the Joint Conference of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (ACL-IJCNLP 2021)}},
  location     = {{Online}},
  pages        = {{1816–1827}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{Argument Undermining: Counter-Argument Generation by Attacking Weak Premises}}},
  doi          = {{10.18653/v1/2021.findings-acl.159}},
  year         = {{2021}},
}

@inproceedings{21178,
  abstract     = {{When engaging in argumentative discourse, skilled human debaters tailor
claims to the beliefs of the audience, to construct effective arguments.
Recently, the field of computational argumentation witnessed extensive effort
to address the automatic generation of arguments. However, existing approaches
do not perform any audience-specific adaptation. In this work, we aim to bridge
this gap by studying the task of belief-based claim generation: Given a
controversial topic and a set of beliefs, generate an argumentative claim
tailored to the beliefs. To tackle this task, we model the people's prior
beliefs through their stances on controversial topics and extend
state-of-the-art text generation models to generate claims conditioned on the
beliefs. Our automatic evaluation confirms the ability of our approach to adapt
claims to a set of given beliefs. In a manual study, we additionally evaluate
the generated claims in terms of informativeness and their likelihood to be
uttered by someone with a respective belief. Our results reveal the limitations
of modeling users' beliefs based on their stances, but demonstrate the
potential of encoding beliefs into argumentative texts, laying the ground for
future exploration of audience reach.}},
  author       = {{Alshomary, Milad and Chen, Wei-Fan and Gurcke, Timon and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume}},
  location     = {{Online}},
  pages        = {{224--233}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{Belief-based Generation of Argumentative Claims}}},
  doi          = {{10.18653/v1/2021.eacl-main.17}},
  year         = {{2021}},
}

@inproceedings{7283,
  author       = {{Alshomary, Milad and Düsterhus, Nick and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval}},
  location     = {{Xi'an, China}},
  pages        = {{1969--1972}},
  title        = {{{Extractive Snippet Generation for Arguments}}},
  year         = {{2020}},
}

@inproceedings{16868,
  author       = {{Alshomary, Milad and Syed, Shahbaz and Potthast, Martin and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of 58th Annual Meeting of the Association for Computational Linguistics (ACL 2020)}},
  location     = {{Seattle, USA}},
  pages        = {{4334--4345}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{Target Inference in Argument Conclusion Generation}}},
  year         = {{2020}},
}

@inproceedings{12931,
  author       = {{Ajjour, Yamen and Alshomary, Milad and Wachsmuth, Henning and Stein, Benno}},
  booktitle    = {{Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and 9th International Joint Conference on Natural Language Processing}},
  pages        = {{2915 -- 2925}},
  title        = {{{Modeling Frames in Argumentation}}},
  year         = {{2019}},
}

