@article{63611,
  abstract     = {{When humans interact with artificial intelligence (AI), one desideratum is appropriate trust. Typically, appropriate trust encompasses that humans trust AI except for instances in which they either explicitly notice AI errors or are suspicious that errors could be present. So far, appropriate trust or related notions have mainly been investigated by assessing trust and reliance. In this contribution, we argue that these assessments are insufficient to measure the complex aim of appropriate trust and the related notion of healthy distrust. We introduce and test the perspective of covert visual attention as an additional indicator for appropriate trust and draw conceptual connections to the notion of healthy distrust. To test the validity of our conceptualization, we formalize visual attention using the Theory of Visual Attention and measure its properties that are potentially relevant to appropriate trust and healthy distrust in an image classification task. Based on temporal-order judgment performance, we estimate participants' attentional capacity and attentional weight toward correct and incorrect mock-up AI classifications. We observe that misclassifications reduce attentional capacity compared to correct classifications. However, our results do not indicate that this reduction is beneficial for a subsequent judgment of the classifications. The attentional weighting is not affected by the classifications' correctness but by the difficulty of categorizing the stimuli themselves. We discuss these results, their implications, and the limited potential for using visual attention as an indicator of appropriate trust and healthy distrust.}},
  author       = {{Peters, Tobias Martin and Biermeier, Kai and Scharlau, Ingrid}},
  issn         = {{1664-1078}},
  journal      = {{Frontiers in Psychology}},
  keywords     = {{appropriate trust, healthy distrust, visual attention, Theory of Visual Attention, human-AI interaction, Bayesian cognitive model, image classification}},
  publisher    = {{Frontiers Media SA}},
  title        = {{{Assessing healthy distrust in human-AI interaction: interpreting changes in visual attention}}},
  doi          = {{10.3389/fpsyg.2025.1694367}},
  volume       = {{16}},
  year         = {{2026}},
}

@article{59756,
  abstract     = {{A current concern in the field of Artificial Intelligence (AI) is to ensure the trustworthiness of AI systems. The development of explainability methods is one prominent way to address this, which has often resulted in the assumption that the use of explainability will lead to an increase in the trust of users and wider society. However, the dynamics between explainability and trust are not well established and empirical investigations of their relation remain mixed or inconclusive.
In this paper we provide a detailed description of the concepts of user trust and distrust in AI and their relation to appropriate reliance. For that we draw from the fields of machine learning, human–computer interaction, and the social sciences. Based on these insights, we have created a focused study of empirical literature of existing empirical studies that investigate the effects of AI systems and XAI methods on user (dis)trust, in order to substantiate our conceptualization of trust, distrust, and reliance. With respect to our conceptual understanding we identify gaps in existing empirical work. With clarifying the concepts and summarizing the empirical studies, we aim to provide researchers, who examine user trust in AI, with an improved starting point for developing user studies to measure and evaluate the user’s attitude towards and reliance on AI systems.}},
  author       = {{Visser, Roel and Peters, Tobias Martin and Scharlau, Ingrid and Hammer, Barbara}},
  issn         = {{1389-0417}},
  journal      = {{Cognitive Systems Research}},
  keywords     = {{XAI, Appropriate trust, Distrust, Reliance, Human-centric evaluation, Trustworthy AI}},
  publisher    = {{Elsevier BV}},
  title        = {{{Trust, distrust, and appropriate reliance in (X)AI: A conceptual clarification of user trust and survey of its empirical evaluation}}},
  doi          = {{10.1016/j.cogsys.2025.101357}},
  year         = {{2025}},
}

@article{59755,
  abstract     = {{Due to the application of Artificial Intelligence (AI) in high-risk domains like law or medicine,
trustworthy AI and trust in AI are of increasing scientific and public relevance. A typical conception,
for example in the context of medical diagnosis, is that a knowledgeable user receives AIgenerated
classification as advice. Research to improve such interactions often aims to foster the
user’s trust, which in turn should improve the combined human-AI performance. Given that AI
models can err, we argue that the possibility to critically review, thus to distrust, an AI decision is
an equally interesting target of research.
We created two image classification scenarios in which the participants received mock-up
AI advice. The quality of the advice decreases for a phase of the experiment. We studied the
task performance, trust and distrust of the participants, and tested whether an instruction to
remain skeptical and review each piece of advice led to a better performance compared to a
neutral condition. Our results indicate that this instruction does not improve but rather worsens
the participants’ performance. Repeated single-item self-report of trust and distrust shows an
increase in trust and a decrease in distrust after the drop in the AI’s classification quality, with no
difference between the two instructions. Furthermore, via a Bayesian Signal Detection Theory
analysis, we provide a procedure to assess appropriate reliance in detail, by quantifying whether
the problems of under- and over-reliance have been mitigated. We discuss implications of our
results for the usage of disclaimers before interacting with AI, as prominently used in current
LLM-based chatbots, and for trust and distrust research.}},
  author       = {{Peters, Tobias Martin and Scharlau, Ingrid}},
  journal      = {{Frontiers in Psychology}},
  keywords     = {{trust in AI, trust, distrust, human-AI interaction, Signal Detection Theory, Bayesian parameter estimation, image classification}},
  title        = {{{Interacting with fallible AI: Is distrust helpful when receiving AI misclassifications?}}},
  doi          = {{10.3389/fpsyg.2025.1574809}},
  volume       = {{16}},
  year         = {{2025}},
}

@article{58650,
  abstract     = {{Technical systems are characterized by increasing interdisciplinarity, complexity and networking. A product and its corresponding production systems require interdisciplinary multi-objective optimization. Sustainability and recyclability demands increase said complexity. The efficiency of previously established engineering methods is reaching its limits, which can only be overcome by systematic integration of extreme data. The aim of "hybrid decision support" is as follows: Data science and artificial intelligence should be used to supplement human capabilities in conjunction with existing heuristics, methods, modeling and simulation to increase the efficiency of product creation.}},
  author       = {{Gräßler, Iris and Pottebaum, Jens and Nyhuis, Peter and Stark, Rainer and Thoben, Klaus-Dieter and Wiederkehr, Petra}},
  issn         = {{2942-6170}},
  journal      = {{Industry 4.0 Science}},
  keywords     = {{AI, artificial intelligence, Data Science, decision support, extreme data, Künstliche Intelligenz, product creation, product development}},
  number       = {{1}},
  publisher    = {{GITO mbH Verlag}},
  title        = {{{Hybrid Decision Support in Product Creation - Improving performance with data science and artificial intelligence}}},
  doi          = {{10.30844/i4sd.25.1.18}},
  volume       = {{2025}},
  year         = {{2025}},
}

@article{61410,
  abstract     = {{Purpose: The purpose of this study is to identify, analyze, and explain the implications that could
arise for service settings if AI systems develop, or are perceived to develop, consciousness – the
ability to acknowledge their own existence and the capacity for positive or negative experiences.

Design/methodology/approach: This study proposes and explores four hypothetical scenarios in
which conscious AI in service could manifest. We contextualize our resulting typology in the
health service context and integrate extant literature on technology-enabled service, AI
consciousness, and AI ethics into the narrative.

Findings: This study provides a unique theoretical contribution to service research in the form of
a Type IV theory. It enables future service researchers to apprehend, explain, and predict how
functionally conscious AI in service might unfold.

Originality: An increasingly prolific public discourse acknowledges that conscious AI systems
may emerge. Against this backdrop, this study aims to systematically explore a question that is
perhaps the most critical and timely, but also inherently speculative, in relation to AI in service
research by introducing much-needed theory and terminology.

Practical implications: The ethical use of conscious AI in service could emerge as a distinct
competitive advantage in the future. Achieving this outcome involves speculative yet actionable
recommendations that include training, guiding, and controlling how humans engage with such
systems, developing appropriate wellbeing protocols for functionally conscious AI systems, and
establishing AI rights and governance frameworks.}},
  author       = {{Breidbach, Christoph and Lars-Erik, Casper Ferm and Maglio, Paul and Beverungen, Daniel and Wirtz, Jochen and Twigg, Alex}},
  journal      = {{Journal of Service Management}},
  keywords     = {{AI, AI consciousness, AI ethics, service systems}},
  publisher    = {{Emerald}},
  title        = {{{Conscious Artificial Intelligence in Service}}},
  year         = {{2025}},
}

@article{61156,
  abstract     = {{Explainability has become an important topic in computer science and artificial intelligence, leading to a subfield called Explainable Artificial Intelligence (XAI). The goal of providing or seeking explanations is to achieve (better) ‘understanding’ on the part of the explainee. However, what it means to ‘understand’ is still not clearly defined, and the concept itself is rarely the subject of scientific investigation. This conceptual article aims to present a model of forms of understanding for XAI-explanations and beyond. From an interdisciplinary perspective bringing together computer science, linguistics, sociology, philosophy and psychology, a definition of understanding and its forms, assessment, and dynamics during the process of giving everyday explanations are explored. Two types of understanding are considered as possible outcomes of explanations, namely enabledness, ‘knowing how’ to do or decide something, and comprehension, ‘knowing that’ – both in different degrees (from shallow to deep). Explanations regularly start with shallow understanding in a specific domain and can lead to deep comprehension and enabledness of the explanandum, which we see as a prerequisite for human users to gain agency. In this process, the increase of comprehension and enabledness are highly interdependent. Against the background of this systematization, special challenges of understanding in XAI are discussed.}},
  author       = {{Buschmeier, Hendrik and Buhl, Heike M. and Kern, Friederike and Grimminger, Angela and Beierling, Helen and Fisher, Josephine Beryl and Groß, André and Horwath, Ilona and Klowait, Nils and Lazarov, Stefan Teodorov and Lenke, Michael and Lohmer, Vivien and Rohlfing, Katharina and Scharlau, Ingrid and Singh, Amit and Terfloth, Lutz and Vollmer, Anna-Lisa and Wang, Yu and Wilmes, Annedore and Wrede, Britta}},
  journal      = {{Cognitive Systems Research}},
  keywords     = {{understanding, explaining, explanations, explainable, AI, interdisciplinarity, comprehension, enabledness, agency}},
  title        = {{{Forms of Understanding for XAI-Explanations}}},
  doi          = {{10.1016/j.cogsys.2025.101419}},
  volume       = {{94}},
  year         = {{2025}},
}

@inproceedings{63019,
  author       = {{Donner, Johannes Aurelius Tamino and Schlüter, Alexander}},
  booktitle    = {{SDEWES Conference 2025}},
  keywords     = {{5GDHC, district heating, DHC, waste heat, AI-Driven}},
  location     = {{Dubrovnik}},
  title        = {{{Development of an AI-driven decentralized control for fifth generation district heating and cooling networks}}},
  year         = {{2025}},
}

@inproceedings{56166,
  abstract     = {{Developing Intelligent Technical Systems (ITS) involves a complex process encompassing planning, analysis, design, production, and maintenance. Model-Based Systems Engineering (MBSE) is a key methodology for systematic systems engineering. Designing models for ITS requires harmonious interaction of various elements, posing a challenge in MBSE. Leveraging Generative Artificial Intelligence, we generated a dataset for modeling, using prompt engineering on large language models. The generated artifacts can aid engineers in MBSE design or serve as synthetic training data for AI assistants.}},
  author       = {{Kulkarni, Pranav Jayant and Tissen, Denis and Bernijazov, Ruslan and Dumitrescu, Roman}},
  booktitle    = {{DS 130: Proceedings of NordDesign 2024}},
  editor       = {{Malmqvist, J. and Candi, M. and Saemundsson, R. and Bystrom, F. and Isaksson, O.}},
  keywords     = {{Data Driven Design, Design Automation, Systems Engineering (SE), Artificial Intelligence (AI)}},
  location     = {{Reykjavik}},
  pages        = {{617--625}},
  title        = {{{Towards Automated Design: Automatically Generating Modeling Elements with Prompt Engineering and Generative Artificial Intelligence}}},
  doi          = {{10.35199/NORDDESIGN2024.66}},
  year         = {{2024}},
}

@inproceedings{56277,
  abstract     = {{What is learner-sensitive feedback to argumentative learner texts when it is to be issued computer- based? Learning stages are difficult to quantify. The paper provides insight into the history of research since the 1980s and a preview of what this automated feedback might look like. These questions are embedded in a research project at the Universities of Paderborn and Hannover, Germany, from which a software (project name ArgSchool) emerges that will provide such feedback.}},
  author       = {{Kilsbach, Sebastian and Michel, Nadine}},
  booktitle    = {{Proceedings of the Tenth Conference of the International Society for the Study of Argumentation}},
  keywords     = {{AI, argumentation mining, discourse history, (automated, learner-sensitive) feedback}},
  location     = {{Leiden}},
  title        = {{{Computer-Based Generation of Learner-Sensitive Feedback to Argumentative Learner Texts}}},
  year         = {{2024}},
}

@misc{56282,
  abstract     = {{Algorithmic bias has long been recognized as a key problem affecting decision-making processes that integrate artificial intelligence (AI) technologies. The increased use of AI in making military decisions relevant to the use of force has sustained such questions about biases in these technologies and in how human users programme with and rely on data based on hierarchized socio-cultural norms, knowledges, and modes of attention.

In this post, Dr Ingvild Bode, Professor at the Center for War Studies, University of Southern Denmark, and Ishmael Bhila, PhD researcher at the “Meaningful Human Control: Between Regulation and Reflexion” project, Paderborn University, unpack the problem of algorithmic bias with reference to AI-based decision support systems (AI DSS). They examine three categories of algorithmic bias – preexisting bias, technical bias, and emergent bias – across four lifecycle stages of an AI DSS, concluding that stakeholders in the ongoing discussion about AI in the military domain should consider the impact of algorithmic bias on AI DSS more seriously.}},
  author       = {{Bhila, Ishmael and Bode, Ingvild}},
  keywords     = {{Algorithmic Bias, AI, Decision Support Systems, Autonomous Weapons Systems}},
  publisher    = {{ICRC Humanitarian Law & Policy Blog}},
  title        = {{{The problem of algorithmic bias in AI-based military decision support systems}}},
  year         = {{2024}},
}

@article{51368,
  abstract     = {{Dealing with opaque algorithms, the frequent overlap between transparency and explainability produces seemingly unsolvable dilemmas, as the much-discussed trade-off between model performance and model transparency. Referring to Niklas Luhmann's notion of communication, the paper argues that explainability does not necessarily require transparency and proposes an alternative approach. Explanations as communicative processes do not imply any disclosure of thoughts or neural processes, but only reformulations that provide the partners with additional elements and enable them to understand (from their perspective) what has been done and why. Recent computational approaches aiming at post-hoc explainability reproduce what happens in communication, producing explanations of the working of algorithms that can be different from the processes of the algorithms.}},
  author       = {{Esposito, Elena }},
  journal      = {{Sociologica}},
  keywords     = {{Explainable AI, Transparency, Explanation, Communication, Sociological systems theory}},
  number       = {{3}},
  pages        = {{17--27}},
  title        = {{{Does Explainability Require Transparency?}}},
  doi          = {{10.6092/ISSN.1971-8853/15804}},
  volume       = {{16}},
  year         = {{2023}},
}

@article{51369,
  abstract     = {{This short introduction presents the symposium ‘Explaining Machines’. It locates the debate about Explainable AI in the history of the reflection about AI and outlines the issues discussed in the contributions.}},
  author       = {{Esposito, Elena}},
  journal      = {{Sociologica}},
  keywords     = {{Explainable AI, Inexplicability, Transparency, Explanation, Opacity, Contestability}},
  number       = {{3}},
  pages        = {{1--4}},
  title        = {{{Explaining Machines: Social Management of Incomprehensible Algorithms. Introduction}}},
  doi          = {{10.6092/ISSN.1971-8853/16265}},
  volume       = {{16}},
  year         = {{2023}},
}

@inproceedings{33490,
  abstract     = {{Algorithmic fairness in Information Systems (IS) is a concept that aims to mitigate systematic discrimination and bias in automated decision-making. However, previous research argued that different fairness criteria are often incompatible. In hiring, AI is used to assess and rank applicants according to their fit for vacant positions. However, various types of bias also exist for AI-based algorithms (e.g., using biased historical data). To reduce AI’s bias and thereby unfair treatment, we conducted a systematic literature review to identify suitable strategies for the context of hiring. We identified nine fundamental articles in this context and extracted four types of approaches to address unfairness in AI, namely pre-process, in-process, post-process, and feature selection. Based on our findings, we (a) derived a research agenda for future studies and (b) proposed strategies for practitioners who design and develop AIs for hiring purposes.}},
  author       = {{Rieskamp, Jonas and Hofeditz, Lennart and Mirbabaie, Milad and Stieglitz, Stefan}},
  booktitle    = {{Proceedings of the Annual Hawaii International Conference on System Sciences (HICSS)}},
  keywords     = {{fairness in AI, SLR, hiring, AI implementation, AI-based algorithms}},
  title        = {{{Approaches to Improve Fairness when Deploying AI-based Algorithms in Hiring – Using a Systematic Literature Review to Guide Future Research}}},
  year         = {{2023}},
}

@article{45299,
  abstract     = {{Many applications are driven by Machine Learning (ML) today. While complex ML models lead to an accurate prediction, their inner decision-making is obfuscated. However, especially for high-stakes decisions, interpretability and explainability of the model are necessary. Therefore, we develop a holistic interpretability and explainability framework (HIEF) to objectively describe and evaluate an intelligent system’s explainable AI (XAI) capacities. This guides data scientists to create more transparent models. To evaluate our framework, we analyse 50 real estate appraisal papers to ensure the robustness of HIEF. Additionally, we identify six typical types of intelligent systems, so-called archetypes, which range from explanatory to predictive, and demonstrate how researchers can use the framework to identify blind-spot topics in their domain. Finally, regarding comprehensiveness, we used a random sample of six intelligent systems and conducted an applicability check to provide external validity.}},
  author       = {{Kucklick, Jan-Peter}},
  issn         = {{1246-0125}},
  journal      = {{Journal of Decision Systems}},
  keywords     = {{Explainable AI (XAI), machine learning, interpretability, real estate appraisal, framework, taxonomy}},
  pages        = {{1--41}},
  publisher    = {{Taylor & Francis}},
  title        = {{{HIEF: a holistic interpretability and explainability framework}}},
  doi          = {{10.1080/12460125.2023.2207268}},
  year         = {{2023}},
}

@inproceedings{56477,
  abstract     = {{We describe a prototype of a Clinical Decision Support System (CDSS) that provides (counterfactual) explanations to support accurate medical diagnosis. The prototype is based on an inherently interpretable Bayesian network (BN). Our research aims to investigate which explanations are most useful for medical experts and whether co-constructing explanations can foster trust and acceptance of CDSS.}},
  author       = {{Liedeker, Felix and Cimiano, Philipp}},
  keywords     = {{Explainable AI, Clinical decision support, Bayesian network, Counterfactual explanations}},
  location     = {{Lissabon}},
  title        = {{{A Prototype of an Interactive Clinical Decision Support System with Counterfactual Explanations}}},
  year         = {{2023}},
}

@inproceedings{29220,
  abstract     = {{Modern services often comprise several components, such as chained virtual network functions, microservices, or
machine learning functions. Providing such services requires to decide how often to instantiate each component, where to place these instances in the network, how to chain them and route traffic through them. 
To overcome limitations of conventional, hardwired heuristics, deep reinforcement learning (DRL) approaches for self-learning network and service management have emerged recently. These model-free DRL approaches are more flexible but typically learn tabula rasa, i.e., disregard existing understanding of networks, services, and their coordination. 

Instead, we propose FutureCoord, a novel model-based AI approach that leverages existing understanding of networks and services for more efficient and effective coordination without time-intensive training. FutureCoord combines Monte Carlo Tree Search with a stochastic traffic model. This allows FutureCoord to estimate the impact of future incoming traffic and effectively optimize long-term effects, taking fluctuating demand and Quality of Service (QoS) requirements into account. Our extensive evaluation based on real-world network topologies, services, and traffic traces indicates that FutureCoord clearly outperforms state-of-the-art model-free and model-based approaches with up to 51% higher flow success ratios.}},
  author       = {{Werner, Stefan and Schneider, Stefan Balthasar and Karl, Holger}},
  booktitle    = {{IEEE/IFIP Network Operations and Management Symposium (NOMS)}},
  keywords     = {{network management, service management, AI, Monte Carlo Tree Search, model-based, QoS}},
  location     = {{Budapest}},
  publisher    = {{IEEE}},
  title        = {{{Use What You Know: Network and Service Coordination Beyond Certainty}}},
  year         = {{2022}},
}

@article{37155,
  abstract     = {{Artificial intelligence (AI) has moved beyond the planning phase in many organisations and it is often accompanied by uncertainties and fears of job loss among employees. It is crucial to manage employees{\textquoteright} attitudes towards the deployment of an AI-based technology effectively and counteract possible resistance behaviour. We present lessons learned from an industry case where we conducted interviews with affected employees. We evaluated our results with managers across industries and found that that the deployment of AI-based technologies does not differ from other IT, but that the change is perceived differently due to misguided expectations. }},
  author       = {{Stieglitz, Stefan and Möllmann (Frick), Nicholas R. J. and Mirbabaie, Milad and Hofeditz, Lennart and Ross, Björn}},
  issn         = {{1477-9064}},
  journal      = {{International Journal of Management Practice}},
  keywords     = {{Artificial Intelligence, Change Management, Resistance, AI-Driven Change, AI Deployment, AI Perception}},
  publisher    = {{Inderscience}},
  title        = {{{Recommendations for Managing AI-Driven Change Processes: When Expectations Meet Reality}}},
  year         = {{2021}},
}

