@inproceedings{55338,
  abstract     = {{Metaphorical language is a pivotal element inthe realm of political framing. Existing workfrom linguistics and the social sciences providescompelling evidence regarding the distinctivenessof conceptual framing for politicalideology perspectives. However, the nature andutilization of metaphors and the effect on audiencesof different political ideologies withinpolitical discourses are hardly explored. Toenable research in this direction, in this workwe create a dataset, originally based on newseditorials and labeled with their persuasive effectson liberals and conservatives and extend itwith annotations pertaining to metaphorical usageof language. To that end, first, we identifyall single metaphors and composite metaphors.Secondly, we provide annotations of the sourceand target domains for each metaphor. As aresult, our corpus consists of 300 news editorialsannotated with spans of texts containingmetaphors and the corresponding domains ofwhich these metaphors draw from. Our analysisshows that liberal readers are affected bymetaphors, whereas conservatives are resistantto them. Both ideologies are affected differentlybased on the metaphor source and targetcategory. For example, liberals are affected bymetaphors in the Darkness {&} Light (e.g., death)source domains, where as the source domain ofNature affects conservatives more significantly.}},
  author       = {{Sengupta, Meghdut and El Baff, Roxanne and Alshomary, Milad and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)}},
  editor       = {{Duh, Kevin and Gomez, Helena and Bethard, Steven}},
  pages        = {{3621–3631}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{Analyzing the Use of Metaphors in News Editorials for Political Framing}}},
  year         = {{2024}},
}

@inproceedings{55404,
  abstract     = {{Explanations are pervasive in our lives. Mostly, they occur in dialogical form where an explainer discusses a concept or phenomenon of interest with an explainee. Leaving the explainee with a clear understanding is not straightforward due to the knowledge gap between the two participants. Previous research looked at the interaction of explanation moves, dialogue acts, and topics in successful dialogues with expert explainers. However, daily-life explanations often fail, raising the question of what makes a dialogue successful. In this work, we study explanation dialogues in terms of the interactions between the explainer and explainee and how they correlate with the quality of explanations in terms of a successful understanding on the explainee{’}s side. In particular, we first construct a corpus of 399 dialogues from the Reddit forum {Explain Like I am Five} and annotate it for interaction flows and explanation quality. We then analyze the interaction flows, comparing them to those appearing in expert dialogues. Finally, we encode the interaction flows using two language models that can handle long inputs, and we provide empirical evidence for the effectiveness boost gained through the encoding in predicting the success of explanation dialogues.}},
  author       = {{Alshomary, Milad and Lange, Felix and Booshehri, Meisam and Sengupta, Meghdut and Cimiano, Philipp and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)}},
  editor       = {{Calzolari, Nicoletta and Kan, Min-Yen and Hoste, Veronique and Lenci, Alessandro and Sakti, Sakriani and Xue, Nianwen}},
  pages        = {{11523–11536}},
  publisher    = {{ELRA and ICCL}},
  title        = {{{Modeling the Quality of Dialogical Explanations}}},
  year         = {{2024}},
}

@inproceedings{58722,
  abstract     = {{Dialects introduce syntactic and lexical variations in language that occur in regional or social groups. Most NLP methods are not sensitive to such variations. This may lead to unfair behavior of the methods, conveying negative bias towards dialect speakers. While previous work has studied dialect-related fairness for aspects like hate speech, other aspects of biased language, such as lewdness, remain fully unexplored. To fill this gap, we investigate performance disparities between dialects in the detection of five aspects of biased language and how to mitigate them. To alleviate bias, we present a multitask learning approach that models dialect language as an auxiliary task to incorporate syntactic and lexical variations. In our experiments with African-American English dialect, we provide empirical evidence that complementing common learning approaches with dialect modeling improves their fairness. Furthermore, the results suggest that multitask learning achieves state-of-the-art performance and helps to detect properties of biased language more reliably.}},
  author       = {{Spliethöver, Maximilian and Menon, Sai Nikhil and Wachsmuth, Henning}},
  booktitle    = {{Findings of the Association for Computational Linguistics: ACL 2024}},
  editor       = {{Ku, Lun-Wei and Martins, Andre and Srikumar, Vivek}},
  pages        = {{9294–9313}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{Disentangling Dialect from Social Bias via Multitask Learning to Improve Fairness}}},
  doi          = {{10.18653/v1/2024.findings-acl.553}},
  year         = {{2024}},
}

@inproceedings{55406,
  abstract     = {{Metaphorical language, such as {“}spending time together{”}, projects meaning from a source domain (here, $money$) to a target domain ($time$). Thereby, it highlights certain aspects of the target domain, such as the $effort$ behind the time investment. Highlighting aspects with metaphors (while hiding others) bridges the two domains and is the core of metaphorical meaning construction. For metaphor interpretation, linguistic theories stress that identifying the highlighted aspects is important for a better understanding of metaphors. However, metaphor research in NLP has not yet dealt with the phenomenon of highlighting. In this paper, we introduce the task of identifying the main aspect highlighted in a metaphorical sentence. Given the inherent interaction of source domains and highlighted aspects, we propose two multitask approaches - a joint learning approach and a continual learning approach - based on a finetuned contrastive learning model to jointly predict highlighted aspects and source domains. We further investigate whether (predicted) information about a source domain leads to better performance in predicting the highlighted aspects, and vice versa. Our experiments on an existing corpus suggest that, with the corresponding information, the performance to predict the other improves in terms of model accuracy in predicting highlighted aspects and source domains notably compared to the single-task baselines.}},
  author       = {{Sengupta, Meghdut and Alshomary, Milad and Scharlau, Ingrid and Wachsmuth, Henning}},
  booktitle    = {{Findings of the Association for Computational Linguistics: EMNLP 2023}},
  editor       = {{Bouamor, Houda and Pino, Juan and Bali, Kalika}},
  pages        = {{4636–4659}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{Modeling Highlighting of Metaphors in Multitask Contrastive Learning Paradigms}}},
  doi          = {{10.18653/v1/2023.findings-emnlp.308}},
  year         = {{2023}},
}

@inproceedings{58723,
  abstract     = {{In real-world debates, the most common way to counter an argument is to reason against its main point, that is, its conclusion. Existing work on the automatic generation of natural language counter-arguments does not address the relation to the conclusion, possibly because many arguments leave their conclusion implicit. In this paper, we hypothesize that the key to effective counter-argument generation is to explicitly model the argument‘s conclusion and to ensure that the stance of the generated counter is opposite to that conclusion. In particular, we propose a multitask approach that jointly learns to generate both the conclusion and the counter of an input argument. The approach employs a stance-based ranking component that selects the counter from a diverse set of generated candidates whose stance best opposes the generated conclusion. In both automatic and manual evaluation, we provide evidence that our approach generates more relevant and stance-adhering counters than strong baselines.}},
  author       = {{Alshomary, Milad and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics}},
  editor       = {{Vlachos, Andreas and Augenstein, Isabelle}},
  pages        = {{957–967}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{Conclusion-based Counter-Argument Generation}}},
  doi          = {{10.18653/v1/2023.eacl-main.67}},
  year         = {{2023}},
}

@inproceedings{33004,
  author       = {{Wachsmuth, Henning and Alshomary, Milad}},
  booktitle    = {{Proceedings of the 29th International Conference on Computational Linguistics}},
  pages        = {{344 -- 354}},
  title        = {{{"Mama Always Had a Way of Explaining Things So I Could Understand": A Dialogue Corpus for Learning How to Explain}}},
  year         = {{2022}},
}

@article{34049,
  author       = {{Lauscher, Anne and Wachsmuth, Henning and Gurevych, Iryna and Glavaš, Goran}},
  journal      = {{Transactions of the Association for Computational Linguistics}},
  title        = {{{On the Role of Knowledge in  Computational Argumentation}}},
  year         = {{2022}},
}

@inproceedings{22157,
  author       = {{Kiesel, Johannes and Alshomary, Milad and Handke, Nicolas and Cai, Xiaoni and Wachsmuth, Henning and Stein, Benno}},
  booktitle    = {{Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics}},
  pages        = {{4459 -- 4471}},
  title        = {{{Identifying the Human Values behind Arguments}}},
  year         = {{2022}},
}

@inproceedings{34047,
  abstract     = {{News articles both shape and reflect public opinion across the political
spectrum. Analyzing them for social bias can thus provide valuable insights,
such as prevailing stereotypes in society and the media, which are often
adopted by NLP models trained on respective data. Recent work has relied on
word embedding bias measures, such as WEAT. However, several representation
issues of embeddings can harm the measures' accuracy, including low-resource
settings and token frequency differences. In this work, we study what kind of
embedding algorithm serves best to accurately measure types of social bias
known to exist in US online news articles. To cover the whole spectrum of
political bias in the US, we collect 500k articles and review psychology
literature with respect to expected social bias. We then quantify social bias
using WEAT along with embedding algorithms that account for the aforementioned
issues. We compare how models trained with the algorithms on news articles
represent the expected social bias. Our results suggest that the standard way
to quantify bias does not align well with knowledge from psychology. While the
proposed algorithms reduce the~gap, they still do not fully match the
literature.}},
  author       = {{Spliethöver, Maximilian and Keiff, Maximilian and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of The 2022 Conference on Empirical Methods in Natural Language Processing (EMNLP 2022)}},
  location     = {{Abu Dhabi}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{No Word Embedding Model Is Perfect: Evaluating the Representation  Accuracy for Social Bias in the Media}}},
  year         = {{2022}},
}

@inbook{34077,
  author       = {{Bondarenko, Alexander and Fröbe, Maik and Kiesel, Johannes and Syed, Shahbaz and Gurcke, Timon and Beloucif, Meriem and Panchenko, Alexander and Biemann, Chris and Stein, Benno and Wachsmuth, Henning and Potthast, Martin and Hagen, Matthias}},
  booktitle    = {{Lecture Notes in Computer Science}},
  isbn         = {{9783030997380}},
  issn         = {{0302-9743}},
  publisher    = {{Springer International Publishing}},
  title        = {{{Overview of Touché 2022: Argument Retrieval}}},
  doi          = {{10.1007/978-3-030-99739-7_43}},
  year         = {{2022}},
}

@inproceedings{33274,
  author       = {{Chen, Wei-Fan and Chen, Mei-Hua and Mudgal, Garima and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 9th Workshop on Argument Mining (ArgMining 2022)}},
  pages        = {{51 -- 61}},
  title        = {{{Analyzing Culture-Specific Argument Structures in Learner Essays}}},
  year         = {{2022}},
}

@inproceedings{31068,
  author       = {{Chen, Mei-Hua and Mudgal, Garima and Chen, Wei-Fan and Wachsmuth, Henning}},
  booktitle    = {{EUROCALL}},
  title        = {{{Investigating the argumentation structures of EFL learners from diverse language backgrounds}}},
  year         = {{2022}},
}

@inproceedings{55337,
  abstract     = {{As AI is more and more pervasive in everyday life, humans have an increasing demand to understand its behavior and decisions. Most research on explainable AI builds on the premise that there is one ideal explanation to be found. In fact, however, everyday explanations are co-constructed in a dialogue between the person explaining (the explainer) and the specific person being explained to (the explainee). In this paper, we introduce a first corpus of dialogical explanations to enable NLP research on how humans explain as well as on how AI can learn to imitate this process. The corpus consists of 65 transcribed English dialogues from the Wired video series 5 Levels, explaining 13 topics to five explainees of different proficiency. All 1550 dialogue turns have been manually labeled by five independent professionals for the topic discussed as well as for the dialogue act and the explanation move performed. We analyze linguistic patterns of explainers and explainees, and we explore differences across proficiency levels. BERT-based baseline results indicate that sequence information helps predicting topics, acts, and moves effectively.}},
  author       = {{Wachsmuth, Henning and Alshomary, Milad}},
  booktitle    = {{Proceedings of the 29th International Conference on Computational Linguistics}},
  editor       = {{Calzolari, Nicoletta and Huang, Chu-Ren and Kim, Hansaem and Pustejovsky, James and Wanner, Leo and Choi, Key-Sun and Ryu, Pum-Mo and Chen, Hsin-Hsi and Donatelli, Lucia and Ji, Heng and Kurohashi, Sadao and Paggio, Patrizia and Xue, Nianwen and Kim, Seokhwan and Hahm, Younggyun and He, Zhong and Lee, Tony Kyungil and Santus, Enrico and Bond, Francis and Na, Seung-Hoon}},
  pages        = {{344–354}},
  publisher    = {{International Committee on Computational Linguistics}},
  title        = {{{“Mama Always Had a Way of Explaining Things So I Could Understand”: A Dialogue Corpus for Learning to Construct Explanations}}},
  year         = {{2022}},
}

@inproceedings{34067,
  author       = {{Sengupta, Meghdut and Alshomary, Milad and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 2022 Workshop on Figurative Language Processing}},
  title        = {{{Back to the Roots: Predicting the Source Domain of Metaphors using Contrastive Learning}}},
  year         = {{2022}},
}

@misc{29000,
  abstract     = {{This thesis aims to provide a bidirectional chatbot solution for the requirement engineering process. The Sonderforschungsbereich (SFB) 901 intends to provide the composition of software service On-the-Fly (OTF). The sub-project (B1) of the SFB 901 project deals with the parameters of service configuration. OTF Computing aims to eradicate the dependency on the requirement engineers for the software development process. However, there is no existing bidirectional chatbot solution that analyses user software requirements and provides viable suggestions to the user regarding their service. Previously, CORDULA chatbot was developed to analyze the software requirements but cannot keep the conversation’s context. The Rasa framework is integrated with the knowledge base to solve the issue, the knowledge base provides domain-specific knowledge to the chatbot. The software description is passed through the natural language understanding process to give consciousness to the chatbot. This process involves various machine learning models, including app family classification, to correctly identify the domain for user OTF service. The statistical models like naïve Bayes, kNN and SVM are compared with transformer models for this classification task. Furthermore, the entities (functional requirements) are also separated from the user description.
The chatbot provides the suggestion of requirements from the preliminary service template with the support of the knowledge base. Furthermore, the generated response is compared with the state-of-the-art DialoGPT transformer model and ChatterBot conversational library. These models are trained over the software development related conversational dataset. All the responses are ranked using the DialoRPT model, and the BLEU score to evaluates the models’ responses. Moreover, the chatbot mod- els are tested with human participants, they used and scored the chatbot responses based on effectiveness, efficiency and satisfaction. The overall response accuracy is also measured by averaging the user approval over the generated responses.}},
  author       = {{Ahmed, Mobeen}},
  title        = {{{Knowledge Base Enhanced & User-centric Dialogue Design for OTF Computing}}},
  year         = {{2022}},
}

@misc{45790,
  author       = {{Palushi, Juela}},
  title        = {{{Domain-aware Text Professionalization using Sequence-to-Sequence Neural Networks}}},
  year         = {{2022}},
}

@misc{45789,
  author       = {{Budanurmath, Vinaykumar}},
  title        = {{{Propaganda Technique Detection Using Connotation Frames}}},
  year         = {{2022}},
}

@inproceedings{32247,
  author       = {{Alshomary, Milad and Rieskamp, Jonas and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 9th International Conference on Computational Models of Argument}},
  pages        = {{21 -- 31}},
  title        = {{{Generating Contrastive Snippets for Argument Search}}},
  doi          = {{http://dx.doi.org/10.3233/FAIA220138}},
  year         = {{2022}},
}

@inproceedings{30840,
  author       = {{Alshomary, Milad and El Baff, Roxanne and Gurcke, Timon and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics}},
  pages        = {{8782 -- 8797}},
  title        = {{{The Moral Debater: A Study on the Computational Generation of Morally Framed Arguments}}},
  year         = {{2022}},
}

@inproceedings{20115,
  author       = {{Skitalinskaya, Gabriella and Klaff, Jonas and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics}},
  pages        = {{1718--1729}},
  title        = {{{Learning From Revisions: Quality Assessment of Claims in Argumentation at Scale}}},
  year         = {{2021}},
}

