@inproceedings{61421,
  author       = {{Battefeld, Dominik and Kopp, Stefan}},
  booktitle    = {{Proceedings of KogWis 2025: Conference of the German Cognitive Science Society}},
  location     = {{Bochum, Germany}},
  title        = {{{Implementing a computational cognitive process model of medical diagnostic reasoning}}},
  year         = {{2025}},
}

@inproceedings{56480,
  abstract     = {{As the field of healthcare increasingly adopts artificial intelligence, it becomes important to understand which types of explanations increase transparency and empower users to develop confidence and trust in the predictions made by machine learning (ML) systems. 
In shared decision-making scenarios where doctors cooperate with ML systems to reach an appropriate decision, establishing mutual trust is crucial. In this paper, we explore different approaches to generating explanations in eXplainable AI (XAI) and make their underlying arguments explicit so that they can be evaluated by medical experts.
In particular, we present the findings of a user study conducted with physicians to investigate their perceptions of various types of AI-generated explanations in the context of diagnostic decision support. The study aims to identify the most effective and useful explanations that enhance the diagnostic process. 
In the study, medical doctors filled out a survey to assess different types of explanations. Further, an interview was carried out post-survey to gain qualitative insights on the requirements of explanations incorporated in diagnostic decision support. Overall, the insights gained from this study contribute to understanding the types of explanations that are most effective.}},
  author       = {{Liedeker, Felix and Sanchez-Graillet, Olivia and Seidler, Moana and Brandt, Christian and Wellmer, Jörg and Cimiano, Philipp}},
  location     = {{Santiago de Compostela, Spain}},
  title        = {{{A User Study Evaluating Argumentative Explanations in Diagnostic Decision Support}}},
  year         = {{2024}},
}

@inproceedings{56479,
  abstract     = {{While the importance of explainable artificial intelligence in high-stakes decision-making is widely recognized in existing literature, empirical studies assessing users' perceived value of explanations are scarce. In this paper, we aim to address this shortcoming by conducting an empirical study focused on measuring the perceived value of the following types of explanations: plain explanations based on feature attribution, counterfactual explanations and complex counterfactual explanations. We measure an explanation's value using five dimensions: perceived accuracy, understandability, plausibility, sufficiency of detail, and user satisfaction. Our findings indicate a sweet spot of explanation complexity, with both dimensional and structural complexity positively impacting the perceived value up to a certain threshold.}},
  author       = {{Liedeker, Felix and Düsing, Christoph and Nieveler, Marcel and Cimiano, Philipp}},
  keywords     = {{XAI, Explanation Complexity, User Perception}},
  location     = {{Valetta, Malta}},
  title        = {{{An Empirical Investigation of Users' Assessment of XAI Explanations: Identifying the Sweet-Spot of Explanation Complexity}}},
  year         = {{2024}},
}

@inproceedings{56844,
  author       = {{Battefeld, Dominik and Liedeker, Felix and Cimiano, Philipp and Kopp, Stefan}},
  booktitle    = {{Proceedings of the 1st Workshop on Multimodal, Affective and Interactive eXplainable AI (MAI-XAI)}},
  location     = {{Santiago de Compostela, Spain}},
  title        = {{{ASCODI: An XAI-based interactive reasoning support system for justifiable medical diagnosing}}},
  year         = {{2024}},
}

@inproceedings{55429,
  abstract     = {{A detailed understanding of the cognitive process underlying diagnostic reasoning in medical experts is currently lacking. While high-level theories like hypothetico-deductive reasoning were proposed long ago, the inner workings of the step-by-step dynamics within the mind remain unknown. We present a fully automated approach to elicit, monitor, and record diagnostic reasoning processes at a fine-grained level. A web-based user interface enables physicians to carry out a full diagnosis process on a simulated patient, given as a pre-defined clinical vignette. By collecting the physician’s information queries and hypothesis revisions, highly detailed diagnostic reasoning trajectories are captured leading to a diagnosis and its justification. Four expert epileptologists with a mean experience of 19 years were recruited to evaluate the system and share their impressions in semi-structured interviews. We find that the recorded trajectories validate proposed theories on broader diagnostic reasoning, while also providing valuable additional details extending previous findings.}},
  author       = {{Battefeld, Dominik and Mues, Sigrid and Wehner, Tim and House, Patrick and Kellinghaus, Christoph and Wellmer, Jörg and Kopp, Stefan}},
  booktitle    = {{Proceedings of the 46th Annual Conference of the Cognitive Science Society}},
  keywords     = {{Differential Diagnosis, Diagnostic Reasoning, Reasoning Process Analysis, Seizure, Epilepsy}},
  location     = {{Rotterdam, NL}},
  title        = {{{Revealing the Dynamics of Medical Diagnostic Reasoning as Step-by-Step Cognitive Process Trajectories}}},
  year         = {{2024}},
}

@inproceedings{56477,
  abstract     = {{We describe a prototype of a Clinical Decision Support System (CDSS) that provides (counterfactual) explanations to support accurate medical diagnosis. The prototype is based on an inherently interpretable Bayesian network (BN). Our research aims to investigate which explanations are most useful for medical experts and whether co-constructing explanations can foster trust and acceptance of CDSS.}},
  author       = {{Liedeker, Felix and Cimiano, Philipp}},
  keywords     = {{Explainable AI, Clinical decision support, Bayesian network, Counterfactual explanations}},
  location     = {{Lissabon}},
  title        = {{{A Prototype of an Interactive Clinical Decision Support System with Counterfactual Explanations}}},
  year         = {{2023}},
}

@inproceedings{56478,
  author       = {{Liedeker, Felix and Cimiano, Philipp}},
  location     = {{Breckenridge, CO, USA }},
  title        = {{{Dynamic Feature Selection in AI-based Diagnostic Decision Support for Epilepsy}}},
  year         = {{2023}},
}

@inproceedings{51343,
  abstract     = {{This paper presents preliminary work on the formalization of three prominent cognitive biases in the diagnostic reasoning process over epileptic seizures, psychogenic seizures and syncopes. Diagnostic reasoning is understood as iterative exploration of medical evidence. This exploration is represented as a partially observable Markov decision process where the state (i.e., the correct diagnosis) is uncertain. Observation likelihoods and belief updates are computed using a Bayesian network which defines the interrelation between medical risk factors, diagnoses and potential findings. The decision problem is solved via partially observable upper confidence bounds for trees in Monte-Carlo planning. We compute a biased diagnostic exploration policy by altering the generated state transition, observation and reward during look ahead simulations. The resulting diagnostic policies reproduce reasoning errors which have only been described informally in the medical literature. We plan to use this formal representation in the future to inversely detect and classify biased reasoning in actual diagnostic trajectories obtained from physicians.}},
  author       = {{Battefeld, Dominik and Kopp, Stefan}},
  booktitle    = {{Proceedings of the 8th Workshop on Formal and Cognitive Reasoning}},
  keywords     = {{Diagnostic reasoning, Cognitive bias, Cognitive model, POMDP, Bayesian network, Epilepsy, CDSS}},
  location     = {{Trier}},
  title        = {{{Formalizing cognitive biases in medical diagnostic reasoning}}},
  year         = {{2022}},
}

