@article{63611,
  abstract     = {{When humans interact with artificial intelligence (AI), one desideratum is appropriate trust. Typically, appropriate trust encompasses that humans trust AI except for instances in which they either explicitly notice AI errors or are suspicious that errors could be present. So far, appropriate trust or related notions have mainly been investigated by assessing trust and reliance. In this contribution, we argue that these assessments are insufficient to measure the complex aim of appropriate trust and the related notion of healthy distrust. We introduce and test the perspective of covert visual attention as an additional indicator for appropriate trust and draw conceptual connections to the notion of healthy distrust. To test the validity of our conceptualization, we formalize visual attention using the Theory of Visual Attention and measure its properties that are potentially relevant to appropriate trust and healthy distrust in an image classification task. Based on temporal-order judgment performance, we estimate participants' attentional capacity and attentional weight toward correct and incorrect mock-up AI classifications. We observe that misclassifications reduce attentional capacity compared to correct classifications. However, our results do not indicate that this reduction is beneficial for a subsequent judgment of the classifications. The attentional weighting is not affected by the classifications' correctness but by the difficulty of categorizing the stimuli themselves. We discuss these results, their implications, and the limited potential for using visual attention as an indicator of appropriate trust and healthy distrust.}},
  author       = {{Peters, Tobias Martin and Biermeier, Kai and Scharlau, Ingrid}},
  issn         = {{1664-1078}},
  journal      = {{Frontiers in Psychology}},
  keywords     = {{appropriate trust, healthy distrust, visual attention, Theory of Visual Attention, human-AI interaction, Bayesian cognitive model, image classification}},
  publisher    = {{Frontiers Media SA}},
  title        = {{{Assessing healthy distrust in human-AI interaction: interpreting changes in visual attention}}},
  doi          = {{10.3389/fpsyg.2025.1694367}},
  volume       = {{16}},
  year         = {{2026}},
}

@article{59756,
  abstract     = {{A current concern in the field of Artificial Intelligence (AI) is to ensure the trustworthiness of AI systems. The development of explainability methods is one prominent way to address this, which has often resulted in the assumption that the use of explainability will lead to an increase in the trust of users and wider society. However, the dynamics between explainability and trust are not well established and empirical investigations of their relation remain mixed or inconclusive.
In this paper we provide a detailed description of the concepts of user trust and distrust in AI and their relation to appropriate reliance. For that we draw from the fields of machine learning, human–computer interaction, and the social sciences. Based on these insights, we have created a focused study of empirical literature of existing empirical studies that investigate the effects of AI systems and XAI methods on user (dis)trust, in order to substantiate our conceptualization of trust, distrust, and reliance. With respect to our conceptual understanding we identify gaps in existing empirical work. With clarifying the concepts and summarizing the empirical studies, we aim to provide researchers, who examine user trust in AI, with an improved starting point for developing user studies to measure and evaluate the user’s attitude towards and reliance on AI systems.}},
  author       = {{Visser, Roel and Peters, Tobias Martin and Scharlau, Ingrid and Hammer, Barbara}},
  issn         = {{1389-0417}},
  journal      = {{Cognitive Systems Research}},
  keywords     = {{XAI, Appropriate trust, Distrust, Reliance, Human-centric evaluation, Trustworthy AI}},
  publisher    = {{Elsevier BV}},
  title        = {{{Trust, distrust, and appropriate reliance in (X)AI: A conceptual clarification of user trust and survey of its empirical evaluation}}},
  doi          = {{10.1016/j.cogsys.2025.101357}},
  year         = {{2025}},
}

@article{59755,
  abstract     = {{Due to the application of Artificial Intelligence (AI) in high-risk domains like law or medicine,
trustworthy AI and trust in AI are of increasing scientific and public relevance. A typical conception,
for example in the context of medical diagnosis, is that a knowledgeable user receives AIgenerated
classification as advice. Research to improve such interactions often aims to foster the
user’s trust, which in turn should improve the combined human-AI performance. Given that AI
models can err, we argue that the possibility to critically review, thus to distrust, an AI decision is
an equally interesting target of research.
We created two image classification scenarios in which the participants received mock-up
AI advice. The quality of the advice decreases for a phase of the experiment. We studied the
task performance, trust and distrust of the participants, and tested whether an instruction to
remain skeptical and review each piece of advice led to a better performance compared to a
neutral condition. Our results indicate that this instruction does not improve but rather worsens
the participants’ performance. Repeated single-item self-report of trust and distrust shows an
increase in trust and a decrease in distrust after the drop in the AI’s classification quality, with no
difference between the two instructions. Furthermore, via a Bayesian Signal Detection Theory
analysis, we provide a procedure to assess appropriate reliance in detail, by quantifying whether
the problems of under- and over-reliance have been mitigated. We discuss implications of our
results for the usage of disclaimers before interacting with AI, as prominently used in current
LLM-based chatbots, and for trust and distrust research.}},
  author       = {{Peters, Tobias Martin and Scharlau, Ingrid}},
  journal      = {{Frontiers in Psychology}},
  keywords     = {{trust in AI, trust, distrust, human-AI interaction, Signal Detection Theory, Bayesian parameter estimation, image classification}},
  title        = {{{Interacting with fallible AI: Is distrust helpful when receiving AI misclassifications?}}},
  doi          = {{10.3389/fpsyg.2025.1574809}},
  volume       = {{16}},
  year         = {{2025}},
}

@article{58650,
  abstract     = {{Technical systems are characterized by increasing interdisciplinarity, complexity and networking. A product and its corresponding production systems require interdisciplinary multi-objective optimization. Sustainability and recyclability demands increase said complexity. The efficiency of previously established engineering methods is reaching its limits, which can only be overcome by systematic integration of extreme data. The aim of "hybrid decision support" is as follows: Data science and artificial intelligence should be used to supplement human capabilities in conjunction with existing heuristics, methods, modeling and simulation to increase the efficiency of product creation.}},
  author       = {{Gräßler, Iris and Pottebaum, Jens and Nyhuis, Peter and Stark, Rainer and Thoben, Klaus-Dieter and Wiederkehr, Petra}},
  issn         = {{2942-6170}},
  journal      = {{Industry 4.0 Science}},
  keywords     = {{AI, artificial intelligence, Data Science, decision support, extreme data, Künstliche Intelligenz, product creation, product development}},
  number       = {{1}},
  publisher    = {{GITO mbH Verlag}},
  title        = {{{Hybrid Decision Support in Product Creation - Improving performance with data science and artificial intelligence}}},
  doi          = {{10.30844/i4sd.25.1.18}},
  volume       = {{2025}},
  year         = {{2025}},
}

@article{61410,
  abstract     = {{Purpose: The purpose of this study is to identify, analyze, and explain the implications that could
arise for service settings if AI systems develop, or are perceived to develop, consciousness – the
ability to acknowledge their own existence and the capacity for positive or negative experiences.

Design/methodology/approach: This study proposes and explores four hypothetical scenarios in
which conscious AI in service could manifest. We contextualize our resulting typology in the
health service context and integrate extant literature on technology-enabled service, AI
consciousness, and AI ethics into the narrative.

Findings: This study provides a unique theoretical contribution to service research in the form of
a Type IV theory. It enables future service researchers to apprehend, explain, and predict how
functionally conscious AI in service might unfold.

Originality: An increasingly prolific public discourse acknowledges that conscious AI systems
may emerge. Against this backdrop, this study aims to systematically explore a question that is
perhaps the most critical and timely, but also inherently speculative, in relation to AI in service
research by introducing much-needed theory and terminology.

Practical implications: The ethical use of conscious AI in service could emerge as a distinct
competitive advantage in the future. Achieving this outcome involves speculative yet actionable
recommendations that include training, guiding, and controlling how humans engage with such
systems, developing appropriate wellbeing protocols for functionally conscious AI systems, and
establishing AI rights and governance frameworks.}},
  author       = {{Breidbach, Christoph and Lars-Erik, Casper Ferm and Maglio, Paul and Beverungen, Daniel and Wirtz, Jochen and Twigg, Alex}},
  journal      = {{Journal of Service Management}},
  keywords     = {{AI, AI consciousness, AI ethics, service systems}},
  publisher    = {{Emerald}},
  title        = {{{Conscious Artificial Intelligence in Service}}},
  year         = {{2025}},
}

@article{61156,
  abstract     = {{Explainability has become an important topic in computer science and artificial intelligence, leading to a subfield called Explainable Artificial Intelligence (XAI). The goal of providing or seeking explanations is to achieve (better) ‘understanding’ on the part of the explainee. However, what it means to ‘understand’ is still not clearly defined, and the concept itself is rarely the subject of scientific investigation. This conceptual article aims to present a model of forms of understanding for XAI-explanations and beyond. From an interdisciplinary perspective bringing together computer science, linguistics, sociology, philosophy and psychology, a definition of understanding and its forms, assessment, and dynamics during the process of giving everyday explanations are explored. Two types of understanding are considered as possible outcomes of explanations, namely enabledness, ‘knowing how’ to do or decide something, and comprehension, ‘knowing that’ – both in different degrees (from shallow to deep). Explanations regularly start with shallow understanding in a specific domain and can lead to deep comprehension and enabledness of the explanandum, which we see as a prerequisite for human users to gain agency. In this process, the increase of comprehension and enabledness are highly interdependent. Against the background of this systematization, special challenges of understanding in XAI are discussed.}},
  author       = {{Buschmeier, Hendrik and Buhl, Heike M. and Kern, Friederike and Grimminger, Angela and Beierling, Helen and Fisher, Josephine Beryl and Groß, André and Horwath, Ilona and Klowait, Nils and Lazarov, Stefan Teodorov and Lenke, Michael and Lohmer, Vivien and Rohlfing, Katharina and Scharlau, Ingrid and Singh, Amit and Terfloth, Lutz and Vollmer, Anna-Lisa and Wang, Yu and Wilmes, Annedore and Wrede, Britta}},
  journal      = {{Cognitive Systems Research}},
  keywords     = {{understanding, explaining, explanations, explainable, AI, interdisciplinarity, comprehension, enabledness, agency}},
  title        = {{{Forms of Understanding for XAI-Explanations}}},
  doi          = {{10.1016/j.cogsys.2025.101419}},
  volume       = {{94}},
  year         = {{2025}},
}

@inproceedings{63019,
  author       = {{Donner, Johannes Aurelius Tamino and Schlüter, Alexander}},
  booktitle    = {{SDEWES Conference 2025}},
  keywords     = {{5GDHC, district heating, DHC, waste heat, AI-Driven}},
  location     = {{Dubrovnik}},
  title        = {{{Development of an AI-driven decentralized control for fifth generation district heating and cooling networks}}},
  year         = {{2025}},
}

@unpublished{53793,
  abstract     = {{We utilize extreme learning machines for the prediction of partial differential equations (PDEs). Our method splits the state space into multiple windows that are predicted individually using a single model. Despite requiring only few data points (in some cases, our method can learn from a single full-state snapshot), it still achieves high accuracy and can predict the flow of PDEs over long time horizons. Moreover, we show how additional symmetries can be exploited to increase sample efficiency and to enforce equivariance.}},
  author       = {{Harder, Hans and Peitz, Sebastian}},
  keywords     = {{extreme learning machines, partial differential equations, data-driven prediction, high-dimensional systems}},
  title        = {{{Predicting PDEs Fast and Efficiently with Equivariant Extreme Learning Machines}}},
  year         = {{2024}},
}

@inproceedings{56166,
  abstract     = {{Developing Intelligent Technical Systems (ITS) involves a complex process encompassing planning, analysis, design, production, and maintenance. Model-Based Systems Engineering (MBSE) is a key methodology for systematic systems engineering. Designing models for ITS requires harmonious interaction of various elements, posing a challenge in MBSE. Leveraging Generative Artificial Intelligence, we generated a dataset for modeling, using prompt engineering on large language models. The generated artifacts can aid engineers in MBSE design or serve as synthetic training data for AI assistants.}},
  author       = {{Kulkarni, Pranav Jayant and Tissen, Denis and Bernijazov, Ruslan and Dumitrescu, Roman}},
  booktitle    = {{DS 130: Proceedings of NordDesign 2024}},
  editor       = {{Malmqvist, J. and Candi, M. and Saemundsson, R. and Bystrom, F. and Isaksson, O.}},
  keywords     = {{Data Driven Design, Design Automation, Systems Engineering (SE), Artificial Intelligence (AI)}},
  location     = {{Reykjavik}},
  pages        = {{617--625}},
  title        = {{{Towards Automated Design: Automatically Generating Modeling Elements with Prompt Engineering and Generative Artificial Intelligence}}},
  doi          = {{10.35199/NORDDESIGN2024.66}},
  year         = {{2024}},
}

@inproceedings{56277,
  abstract     = {{What is learner-sensitive feedback to argumentative learner texts when it is to be issued computer- based? Learning stages are difficult to quantify. The paper provides insight into the history of research since the 1980s and a preview of what this automated feedback might look like. These questions are embedded in a research project at the Universities of Paderborn and Hannover, Germany, from which a software (project name ArgSchool) emerges that will provide such feedback.}},
  author       = {{Kilsbach, Sebastian and Michel, Nadine}},
  booktitle    = {{Proceedings of the Tenth Conference of the International Society for the Study of Argumentation}},
  keywords     = {{AI, argumentation mining, discourse history, (automated, learner-sensitive) feedback}},
  location     = {{Leiden}},
  title        = {{{Computer-Based Generation of Learner-Sensitive Feedback to Argumentative Learner Texts}}},
  year         = {{2024}},
}

@misc{56282,
  abstract     = {{Algorithmic bias has long been recognized as a key problem affecting decision-making processes that integrate artificial intelligence (AI) technologies. The increased use of AI in making military decisions relevant to the use of force has sustained such questions about biases in these technologies and in how human users programme with and rely on data based on hierarchized socio-cultural norms, knowledges, and modes of attention.

In this post, Dr Ingvild Bode, Professor at the Center for War Studies, University of Southern Denmark, and Ishmael Bhila, PhD researcher at the “Meaningful Human Control: Between Regulation and Reflexion” project, Paderborn University, unpack the problem of algorithmic bias with reference to AI-based decision support systems (AI DSS). They examine three categories of algorithmic bias – preexisting bias, technical bias, and emergent bias – across four lifecycle stages of an AI DSS, concluding that stakeholders in the ongoing discussion about AI in the military domain should consider the impact of algorithmic bias on AI DSS more seriously.}},
  author       = {{Bhila, Ishmael and Bode, Ingvild}},
  keywords     = {{Algorithmic Bias, AI, Decision Support Systems, Autonomous Weapons Systems}},
  publisher    = {{ICRC Humanitarian Law & Policy Blog}},
  title        = {{{The problem of algorithmic bias in AI-based military decision support systems}}},
  year         = {{2024}},
}

@article{51368,
  abstract     = {{Dealing with opaque algorithms, the frequent overlap between transparency and explainability produces seemingly unsolvable dilemmas, as the much-discussed trade-off between model performance and model transparency. Referring to Niklas Luhmann's notion of communication, the paper argues that explainability does not necessarily require transparency and proposes an alternative approach. Explanations as communicative processes do not imply any disclosure of thoughts or neural processes, but only reformulations that provide the partners with additional elements and enable them to understand (from their perspective) what has been done and why. Recent computational approaches aiming at post-hoc explainability reproduce what happens in communication, producing explanations of the working of algorithms that can be different from the processes of the algorithms.}},
  author       = {{Esposito, Elena }},
  journal      = {{Sociologica}},
  keywords     = {{Explainable AI, Transparency, Explanation, Communication, Sociological systems theory}},
  number       = {{3}},
  pages        = {{17--27}},
  title        = {{{Does Explainability Require Transparency?}}},
  doi          = {{10.6092/ISSN.1971-8853/15804}},
  volume       = {{16}},
  year         = {{2023}},
}

@article{51369,
  abstract     = {{This short introduction presents the symposium ‘Explaining Machines’. It locates the debate about Explainable AI in the history of the reflection about AI and outlines the issues discussed in the contributions.}},
  author       = {{Esposito, Elena}},
  journal      = {{Sociologica}},
  keywords     = {{Explainable AI, Inexplicability, Transparency, Explanation, Opacity, Contestability}},
  number       = {{3}},
  pages        = {{1--4}},
  title        = {{{Explaining Machines: Social Management of Incomprehensible Algorithms. Introduction}}},
  doi          = {{10.6092/ISSN.1971-8853/16265}},
  volume       = {{16}},
  year         = {{2023}},
}

@inproceedings{52369,
  abstract     = {{Megatrends, such as digitization or sustainability, are confronting the product management of manufacturing companies with a variety of challenges regarding the design of future products, but also the management of the actual products. To successfully position their products in the market, product managers need to gather and analyze comprehensive information about customers, developments in the products’ environment, product usage, and more. The digitization of all aspects of life is making data on these topics increasingly available – via social media, documents, or the internet of things from the products themselves. The systematic collection and analysis of these data enable the exploitation of new potentials for the adaption of existing products and the creation of the products of tomorrow. However, there are still no insights into the main concepts and cause-effect relationships in exploiting data-driven approaches for product management. Therefore, this paper aims to identify the main concepts and advantages of data-driven product management. To answer the corresponding research questions a comprehensive systematic literature review is conducted. From its results, a detailed description of the main concepts of data-driven product management is derived. Furthermore, a taxonomy for the advantages of data-driven product management is presented. The main concepts and the taxonomy allow for a deeper understanding of the topic while highlighting necessary future actions and research needs.}},
  author       = {{Fichtler, Timm and Grigoryan, Khoren and Koldewey, Christian and Dumitrescu, Roman}},
  booktitle    = {{2023 IEEE International Conference on Technology Management, Operations and Decisions (ICTMOD)}},
  keywords     = {{Product Lifecyle Management (PLM), Data Analytics, Data-driven Design, Engineering Management, Lifecycle Data}},
  location     = {{Rabat, Morocco}},
  publisher    = {{IEEE}},
  title        = {{{Towards a Data-Driven Product Management – Concepts, Advantages, and Future Research}}},
  doi          = {{10.1109/ictmod59086.2023.10438135}},
  year         = {{2023}},
}

@inproceedings{33490,
  abstract     = {{Algorithmic fairness in Information Systems (IS) is a concept that aims to mitigate systematic discrimination and bias in automated decision-making. However, previous research argued that different fairness criteria are often incompatible. In hiring, AI is used to assess and rank applicants according to their fit for vacant positions. However, various types of bias also exist for AI-based algorithms (e.g., using biased historical data). To reduce AI’s bias and thereby unfair treatment, we conducted a systematic literature review to identify suitable strategies for the context of hiring. We identified nine fundamental articles in this context and extracted four types of approaches to address unfairness in AI, namely pre-process, in-process, post-process, and feature selection. Based on our findings, we (a) derived a research agenda for future studies and (b) proposed strategies for practitioners who design and develop AIs for hiring purposes.}},
  author       = {{Rieskamp, Jonas and Hofeditz, Lennart and Mirbabaie, Milad and Stieglitz, Stefan}},
  booktitle    = {{Proceedings of the Annual Hawaii International Conference on System Sciences (HICSS)}},
  keywords     = {{fairness in AI, SLR, hiring, AI implementation, AI-based algorithms}},
  title        = {{{Approaches to Improve Fairness when Deploying AI-based Algorithms in Hiring – Using a Systematic Literature Review to Guide Future Research}}},
  year         = {{2023}},
}

@article{45299,
  abstract     = {{Many applications are driven by Machine Learning (ML) today. While complex ML models lead to an accurate prediction, their inner decision-making is obfuscated. However, especially for high-stakes decisions, interpretability and explainability of the model are necessary. Therefore, we develop a holistic interpretability and explainability framework (HIEF) to objectively describe and evaluate an intelligent system’s explainable AI (XAI) capacities. This guides data scientists to create more transparent models. To evaluate our framework, we analyse 50 real estate appraisal papers to ensure the robustness of HIEF. Additionally, we identify six typical types of intelligent systems, so-called archetypes, which range from explanatory to predictive, and demonstrate how researchers can use the framework to identify blind-spot topics in their domain. Finally, regarding comprehensiveness, we used a random sample of six intelligent systems and conducted an applicability check to provide external validity.}},
  author       = {{Kucklick, Jan-Peter}},
  issn         = {{1246-0125}},
  journal      = {{Journal of Decision Systems}},
  keywords     = {{Explainable AI (XAI), machine learning, interpretability, real estate appraisal, framework, taxonomy}},
  pages        = {{1--41}},
  publisher    = {{Taylor & Francis}},
  title        = {{{HIEF: a holistic interpretability and explainability framework}}},
  doi          = {{10.1080/12460125.2023.2207268}},
  year         = {{2023}},
}

@inproceedings{45793,
  abstract     = {{The global megatrends of digitization and sustainability lead to new challenges for the design and management of technical products in industrial companies. Product management - as the bridge between market and company - has the task to absorb and combine the manifold requirements and make the right product-related decisions. In the process, product management is confronted with heterogeneous information, rapidly changing portfolio components, as well as increasing product, and organizational complexity. Combining and utilizing data from different sources, e.g., product usage data and social media data leads to promising potentials to improve the quality of product-related decisions. In this paper, we reinforce the need for data-driven product management as an interdisciplinary field of action. The state of data-driven product management in practice was analyzed by conducting workshops with six manufacturing companies and hosting a focus group meeting with experts from different industries. We investigate the expectations and derive requirements leading us to open research questions, a vision for data-driven product management, and a research agenda to shape future research efforts.}},
  author       = {{Grigoryan, Khoren and Fichtler, Timm and Schreiner, Nick and Rabe, Martin and Panzner, Melina and Kühn, Arno and Dumitrescu, Roman and Koldewey, Christian}},
  booktitle    = {{Procedia CIRP 33}},
  keywords     = {{Product Management, Data Analytics, Data-Driven Design, Product-related data, Lifecycle Data, Tool-support}},
  location     = {{Sydney}},
  title        = {{{Data-Driven Product Management: A Practitioner-Driven Research Agenda}}},
  year         = {{2023}},
}

@inproceedings{56477,
  abstract     = {{We describe a prototype of a Clinical Decision Support System (CDSS) that provides (counterfactual) explanations to support accurate medical diagnosis. The prototype is based on an inherently interpretable Bayesian network (BN). Our research aims to investigate which explanations are most useful for medical experts and whether co-constructing explanations can foster trust and acceptance of CDSS.}},
  author       = {{Liedeker, Felix and Cimiano, Philipp}},
  keywords     = {{Explainable AI, Clinical decision support, Bayesian network, Counterfactual explanations}},
  location     = {{Lissabon}},
  title        = {{{A Prototype of an Interactive Clinical Decision Support System with Counterfactual Explanations}}},
  year         = {{2023}},
}

@inproceedings{34171,
  abstract     = {{State estimation when only a partial model of a considered system is available remains a major challenge in many engineering fields. This work proposes a joint, square-root unscented Kalman filter to estimate states and model uncertainties simultaneously by linear combinations of physics-motivated library functions. Using a sparsity promoting approach, a selection of those linear combinations is chosen and thus an interpretable model can be extracted. Results indicate a small estimation error compared to a traditional square-root unscented Kalman filter and exhibit the enhancement of physically meaningful models.}},
  author       = {{Götte, Ricarda-Samantha and Timmermann, Julia}},
  booktitle    = {{12th IFAC Symposium on Nonlinear Control Systems (NOLCOS 2022)}},
  keywords     = {{joint estimation, unscented transform, Kalman filter, sparsity, data-driven, compressed sensing}},
  location     = {{Canberra, Australien}},
  number       = {{1}},
  pages        = {{85--90}},
  title        = {{{Estimating States and Model Uncertainties Jointly by a Sparsity Promoting UKF}}},
  doi          = {{https://doi.org/10.1016/j.ifacol.2023.02.015}},
  volume       = {{56}},
  year         = {{2023}},
}

@inproceedings{29842,
  abstract     = {{To build successful software products, developers continuously have to discover what features the users really need. This discovery can be achieved with continuous experimentation, testing different software variants with distinct user groups, and deploying the superior variant for all users. However, existing approaches do not focus on explicit modeling of variants and experiments, which offers advantages such as traceability of decisions and combinability of experiments. Therefore, our vision is the provision of model-driven continuous experimentation, which provides the developer with a framework for structuring the experimentation process. For that, we introduce the overall concept, apply it to the experimentation on component-based software architectures and point out future research questions. In particular, we show the applicability by combining feature models for modeling the software variants, users, and experiments (i.e., model-driven) with MAPE-K for the adaptation (i.e., continuous experimentation) and implementing the concept based on the component-based Angular framework.}},
  author       = {{Gottschalk, Sebastian and Yigitbas, Enes and Engels, Gregor}},
  booktitle    = {{Proceedings of the 18th International Conference on Software Architecture Companion }},
  keywords     = {{continuous experimentation, model-driven, component-based software architectures, self-adaptation}},
  location     = {{Hawaii}},
  publisher    = {{IEEE}},
  title        = {{{Model-driven Continuous Experimentation on Component-based Software Architectures }}},
  doi          = {{10.1109/ICSA-C54293.2022.00011}},
  year         = {{2022}},
}

