@inbook{65084,
  author       = {{Buhl, Heike M. and Vollmer, Anna-Lisa and Alami, Rachid and Booshehri, Meisam and Främling, Kary}},
  booktitle    = {{Social explainable AI}},
  editor       = {{Rohlfing, Katharina J. and Främling, Kary and Lim, Brian and Alpsancar, Suzana and Thommes, Kisten}},
  pages        = {{269--295}},
  publisher    = {{Springer}},
  title        = {{{Models of the situation, the explanandum, and the interaction partner}}},
  doi          = {{https://doi.org/10.1007/978-981-96-5290-7_14}},
  year         = {{2026}},
}

@unpublished{61065,
  abstract     = {{Abduction is the task of computing a sufficient extension of a knowledge base (KB) that entails a conclusion not entailed by the original KB. It serves to compute explanations, or hypotheses, for such missing entailments. While this task has been intensively investigated for perfect data and under classical semantics, less is known about abduction when erroneous data results in inconsistent KBs. In this paper we define a suitable notion of abduction under repair semantics and propose a set of minimality criteria that guides abduction towards `useful' hypotheses. We provide initial complexity results on deciding existence of and verifying abductive solutions with these criteria, under different repair semantics and for the description logics DL-Lite and EL_bot.}},
  author       = {{Haak, Anselm and Koopmann, Patrick and Mahmood, Yasir and Turhan, Anni-Yasmin}},
  booktitle    = {{arXiv:2507.21955}},
  title        = {{{Why not? Developing ABox Abduction beyond Repairs}}},
  year         = {{2025}},
}

@inproceedings{59910,
  abstract     = {{<jats:p>The connection between inconsistent databases and Dung’s abstract argumentation framework has recently drawn growing interest. Specifically, an inconsistent database, involving certain types of integrity constraints such as functional and inclusion dependencies, can be viewed as an argumentation framework in Dung’s setting. Nevertheless, no prior work has explored the exact expressive power of Dung’s theory of argumentation when compared to inconsistent databases and integrity constraints. In this paper, we close this gap by arguing that an argumentation framework can also be viewed as an inconsistent database. We first establish a connection between subset-repairs for databases and extensions for AFs considering conflict-free, naive, admissible, and preferred semantics. Further, we define a new family of attribute-based repairs based on the principle of maximal content preservation. The effectiveness of these repairs is then highlighted by connecting them to stable, semi-stable, and stage semantics. Our main contributions include translating an argumentation framework into a database together with integrity constraints. Moreover, this translation can be achieved in polynomial time, which is essential in transferring complexity results between the two formalisms.</jats:p>}},
  author       = {{Mahmood, Yasir and Hecher, Markus and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Proceedings of the AAAI Conference on Artificial Intelligence}},
  issn         = {{2374-3468}},
  number       = {{14}},
  pages        = {{15058--15066}},
  publisher    = {{Association for the Advancement of Artificial Intelligence (AAAI)}},
  title        = {{{Dung’s Argumentation Framework: Unveiling the Expressive Power with Inconsistent Databases}}},
  doi          = {{10.1609/aaai.v39i14.33651}},
  volume       = {{39}},
  year         = {{2025}},
}

@unpublished{60718,
  abstract     = {{The ability to generate explanations that are understood by explainees is the
quintessence of explainable artificial intelligence. Since understanding
depends on the explainee's background and needs, recent research focused on
co-constructive explanation dialogues, where an explainer continuously monitors
the explainee's understanding and adapts their explanations dynamically. We
investigate the ability of large language models (LLMs) to engage as explainers
in co-constructive explanation dialogues. In particular, we present a user
study in which explainees interact with an LLM in two settings, one of which
involves the LLM being instructed to explain a topic co-constructively. We
evaluate the explainees' understanding before and after the dialogue, as well
as their perception of the LLMs' co-constructive behavior. Our results suggest
that LLMs show some co-constructive behaviors, such as asking verification
questions, that foster the explainees' engagement and can improve understanding
of a topic. However, their ability to effectively monitor the current
understanding and scaffold the explanations accordingly remains limited.}},
  author       = {{Fichtel, Leandra and Spliethöver, Maximilian and Hüllermeier, Eyke and Jimenez, Patricia and Klowait, Nils and Kopp, Stefan and Ngonga Ngomo, Axel-Cyrille and Robrecht, Amelie and Scharlau, Ingrid and Terfloth, Lutz and Vollmer, Anna-Lisa and Wachsmuth, Henning}},
  booktitle    = {{arXiv:2504.18483}},
  pages        = {{20}},
  title        = {{{Investigating Co-Constructive Behavior of Large Language Models in  Explanation Dialogues}}},
  year         = {{2025}},
}

@article{59912,
  abstract     = {{<jats:title>Abstract</jats:title>
               <jats:p>We study the expressivity and the complexity of various logics in probabilistic team semantics with the Boolean negation. In particular, we study the extension of probabilistic independence logic with the Boolean negation, and a recently introduced logic first-order theory of random variables with probabilistic independence. We give several results that compare the expressivity of these logics with the most studied logics in probabilistic team semantics setting, as well as relating their expressivity to a numerical variant of second-order logic. In addition, we introduce novel entropy atoms and show that the extension of first-order logic by entropy atoms subsumes probabilistic independence logic. Finally, we obtain some results on the complexity of model checking, validity and satisfiability of our logics.</jats:p>}},
  author       = {{Hannula, Miika and Hirvonen, Minna and Kontinen, Juha and Mahmood, Yasir and Meier, Arne and Virtema, Jonni}},
  issn         = {{0955-792X}},
  journal      = {{Journal of Logic and Computation}},
  number       = {{3}},
  publisher    = {{Oxford University Press (OUP)}},
  title        = {{{Logics with probabilistic team semantics and the Boolean negation}}},
  doi          = {{10.1093/logcom/exaf021}},
  volume       = {{35}},
  year         = {{2025}},
}

@unpublished{61066,
  abstract     = {{Argumentation is a central subarea of Artificial Intelligence (AI) for
modeling and reasoning about arguments. The semantics of abstract argumentation
frameworks (AFs) is given by sets of arguments (extensions) and conditions on
the relationship between them, such as stable or admissible. Today's solvers
implement tasks such as finding extensions, deciding credulous or skeptical
acceptance, counting, or enumerating extensions. While these tasks are well
charted, the area between decision, counting/enumeration and fine-grained
reasoning requires expensive reasoning so far. We introduce a novel concept
(facets) for reasoning between decision and enumeration. Facets are arguments
that belong to some extensions (credulous) but not to all extensions
(skeptical). They are most natural when a user aims to navigate, filter, or
comprehend the significance of specific arguments, according to their needs. We
study the complexity and show that tasks involving facets are much easier than
counting extensions. Finally, we provide an implementation, and conduct
experiments to demonstrate feasibility.}},
  author       = {{Fichte, Johannes and Fröhlich, Nicolas and Hecher, Markus and Lagerkvist, Victor and Mahmood, Yasir and Meier, Arne and Persson, Jonathan}},
  booktitle    = {{arXiv:2505.10982}},
  title        = {{{Facets in Argumentation: A Formal Approach to Argument Significance}}},
  year         = {{2025}},
}

@inproceedings{61234,
  abstract     = {{The ability to generate explanations that are understood by explainees is the
quintessence of explainable artificial intelligence. Since understanding
depends on the explainee's background and needs, recent research focused on
co-constructive explanation dialogues, where an explainer continuously monitors
the explainee's understanding and adapts their explanations dynamically. We
investigate the ability of large language models (LLMs) to engage as explainers
in co-constructive explanation dialogues. In particular, we present a user
study in which explainees interact with an LLM in two settings, one of which
involves the LLM being instructed to explain a topic co-constructively. We
evaluate the explainees' understanding before and after the dialogue, as well
as their perception of the LLMs' co-constructive behavior. Our results suggest
that LLMs show some co-constructive behaviors, such as asking verification
questions, that foster the explainees' engagement and can improve understanding
of a topic. However, their ability to effectively monitor the current
understanding and scaffold the explanations accordingly remains limited.}},
  author       = {{Fichtel, Leandra and Spliethöver, Maximilian and Hüllermeier, Eyke and Jimenez, Patricia and Klowait, Nils and Kopp, Stefan and Ngonga Ngomo, Axel-Cyrille and Robrecht, Amelie and Scharlau, Ingrid and Terfloth, Lutz and Vollmer, Anna-Lisa and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 26th Annual Meeting of the Special Interest Group on Discourse and Dialogue}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{Investigating Co-Constructive Behavior of Large Language Models in  Explanation Dialogues}}},
  year         = {{2025}},
}

@inproceedings{58377,
  abstract     = {{The connection between inconsistent databases and Dung's abstract
argumentation framework has recently drawn growing interest. Specifically, an
inconsistent database, involving certain types of integrity constraints such as
functional and inclusion dependencies, can be viewed as an argumentation
framework in Dung's setting. Nevertheless, no prior work has explored the exact
expressive power of Dung's theory of argumentation when compared to
inconsistent databases and integrity constraints. In this paper, we close this
gap by arguing that an argumentation framework can also be viewed as an
inconsistent database. We first establish a connection between subset-repairs
for databases and extensions for AFs, considering conflict-free, naive,
admissible, and preferred semantics. Further, we define a new family of
attribute-based repairs based on the principle of maximal content preservation.
The effectiveness of these repairs is then highlighted by connecting them to
stable, semi-stable, and stage semantics. Our main contributions include
translating an argumentation framework into a database together with integrity
constraints. Moreover, this translation can be achieved in polynomial time,
which is essential in transferring complexity results between the two
formalisms.}},
  author       = {{Mahmood, Yasir and Hecher, Markus and Ngonga Ngomo, Axel-Cyrille}},
  title        = {{{Dung's Argumentation Framework: Unveiling the Expressive Power with  Inconsistent Databases}}},
  doi          = {{10.1609/AAAI.V39I14.33651}},
  year         = {{2024}},
}

@inbook{57238,
  abstract     = {{<jats:p>Abstract argumentation is a popular toolkit for modeling, evaluating, and comparing arguments. Relationships between arguments are specified in argumentation frameworks (AFs), and conditions are placed on sets (extensions) of arguments that allow AFs to be evaluated. For more expressiveness, AFs are augmented with acceptance conditions on directly interacting arguments or a constraint on the admissible sets of arguments, resulting in dialectic frameworks or constrained argumentation frameworks. In this paper, we consider flexible conditions for rejecting an argument from an extension, which we call rejection conditions (RCs). On the technical level, we associate each argument with a specific logic program. We analyze the resulting complexity, including the structural parameter treewidth. Rejection AFs are highly expressive, giving rise to natural problems on higher levels of the polynomial hierarchy.</jats:p>}},
  author       = {{Fichte, Johannes K. and Hecher, Markus and Mahmood, Yasir and Meier, Arne}},
  booktitle    = {{Frontiers in Artificial Intelligence and Applications}},
  isbn         = {{9781643685489}},
  issn         = {{0922-6389}},
  location     = {{Santiago de Compostela, Spain}},
  publisher    = {{IOS Press}},
  title        = {{{Rejection in Abstract Argumentation: Harder Than Acceptance?}}},
  doi          = {{10.3233/faia240867}},
  year         = {{2024}},
}

@inproceedings{55655,
  abstract     = {{<jats:p>Argumentation is a well-established formalism for nonmonotonic reasoning, with popular frameworks being Dung’s abstract argumentation (AFs) or logic-based argumentation (Besnard-Hunter’s framework). Structurally, a set of formulas forms support for a claim if it is consistent, subset-minimal, and implies the claim. Then, an argument comprises support and a claim. We observe that the computational task (ARG) of asking for support of a claim in a knowledge base is “brave”, since many claims with a single support are accepted. As a result, ARG falls short when it comes to the question of confidence in a claim, or claim strength. In this paper, we propose a concept for measuring the (acceptance) strength of claims, based on counting supports for a claim. Further, we settle classical and structural complexity of counting arguments favoring a given claim in propositional knowledge bases (KBs). We introduce quantitative reasoning to measure the strength of claims in a KB and to determine the relevance strength of a formula for a claim.</jats:p>}},
  author       = {{Hecher, Markus and Mahmood, Yasir and Meier, Arne and Schmidt, Johannes}},
  booktitle    = {{Proceedings of the Thirty-ThirdInternational Joint Conference on Artificial Intelligence}},
  publisher    = {{International Joint Conferences on Artificial Intelligence Organization}},
  title        = {{{Quantitative Claim-Centric Reasoning in Logic-Based Argumentation}}},
  doi          = {{10.24963/ijcai.2024/377}},
  year         = {{2024}},
}

@unpublished{57814,
  abstract     = {{We study consistent query answering via different graph representations.
First, we introduce solution-conflict hypergraphs in which nodes represent
facts and edges represent either conflicts or query solutions. Considering a
monotonic query and a set of antimonotonic constraints, we present an explicit
algorithm for counting the number of repairs satisfying the query based on a
tree decomposition of the solution-conflict hypergraph. The algorithm not only
provides fixed-parameter tractability results for data complexity over
expressive query and constraint classes, but also introduces a novel and
potentially implementable approach to repair counting. Second, we consider the
Gaifman graphs arising from MSO descriptions of consistent query answering.
Using a generalization of Courcelle's theorem, we then present fixed-parameter
tractability results for combined complexity over expressive query and
constraint classes.}},
  author       = {{Hankala, Teemu and Hannula, Miika and Mahmood, Yasir and Meier, Arne}},
  booktitle    = {{arXiv:2412.08324}},
  title        = {{{Parameterised Complexity of Consistent Query Answering via Graph  Representations}}},
  year         = {{2024}},
}

@article{51368,
  abstract     = {{Dealing with opaque algorithms, the frequent overlap between transparency and explainability produces seemingly unsolvable dilemmas, as the much-discussed trade-off between model performance and model transparency. Referring to Niklas Luhmann's notion of communication, the paper argues that explainability does not necessarily require transparency and proposes an alternative approach. Explanations as communicative processes do not imply any disclosure of thoughts or neural processes, but only reformulations that provide the partners with additional elements and enable them to understand (from their perspective) what has been done and why. Recent computational approaches aiming at post-hoc explainability reproduce what happens in communication, producing explanations of the working of algorithms that can be different from the processes of the algorithms.}},
  author       = {{Esposito, Elena }},
  journal      = {{Sociologica}},
  keywords     = {{Explainable AI, Transparency, Explanation, Communication, Sociological systems theory}},
  number       = {{3}},
  pages        = {{17--27}},
  title        = {{{Does Explainability Require Transparency?}}},
  doi          = {{10.6092/ISSN.1971-8853/15804}},
  volume       = {{16}},
  year         = {{2023}},
}

@article{51369,
  abstract     = {{This short introduction presents the symposium ‘Explaining Machines’. It locates the debate about Explainable AI in the history of the reflection about AI and outlines the issues discussed in the contributions.}},
  author       = {{Esposito, Elena}},
  journal      = {{Sociologica}},
  keywords     = {{Explainable AI, Inexplicability, Transparency, Explanation, Opacity, Contestability}},
  number       = {{3}},
  pages        = {{1--4}},
  title        = {{{Explaining Machines: Social Management of Incomprehensible Algorithms. Introduction}}},
  doi          = {{10.6092/ISSN.1971-8853/16265}},
  volume       = {{16}},
  year         = {{2023}},
}

@inproceedings{34674,
  abstract     = {{Smart home systems contain plenty of features that enhance wellbeing in everyday life through artificial intelligence (AI). However, many users feel insecure because they do not understand the AI’s functionality and do not feel they are in control of it. Combining technical, psychological and philosophical views on AI, we rethink smart homes as interactive systems where users can partake in an intelligent agent’s learning. Parallel to the goals of explainable AI (XAI), we explored the possibility of user involvement in supervised learning of the smart home to have a first approach to improve acceptance, support subjective understanding and increase perceived control. In this work, we conducted two studies: In an online pre-study, we asked participants about their attitude towards teaching AI via a questionnaire. In the main study, we performed a Wizard of Oz laboratory experiment with human participants, where participants spent time in a prototypical smart home and taught activity recognition to the intelligent agent through supervised learning based on the user’s behaviour. We found that involvement in the AI’s learning phase enhanced the users’ feeling of control, perceived understanding and perceived usefulness of AI in general. The participants reported positive attitudes towards training a smart home AI and found the process understandable and controllable. We suggest that involving the user in the learning phase could lead to better personalisation and increased understanding and control by users of intelligent agents for smart home automation.}},
  author       = {{Sieger, Leonie Nora and Hermann, Julia and Schomäcker, Astrid and Heindorf, Stefan and Meske, Christian and Hey, Celine-Chiara and Doğangün, Ayşegül}},
  booktitle    = {{International Conference on Human-Agent Interaction}},
  keywords     = {{human-agent interaction, smart homes, supervised learning, participation}},
  location     = {{Christchurch, New Zealand}},
  publisher    = {{ACM}},
  title        = {{{User Involvement in Training Smart Home Agents}}},
  doi          = {{10.1145/3527188.3561914}},
  year         = {{2022}},
}

