@article{58939,
  author       = {{Kornowicz, Jaroslaw and Thommes, Kirsten}},
  journal      = {{Plos One}},
  title        = {{{Algorithm, expert, or both? Evaluating the role of feature selection methods on user preferences and reliance}}},
  doi          = {{10.1371/journal.pone.0318874}},
  year         = {{2025}},
}

@article{61137,
  abstract     = {{Prior research shows that social norms can reduce algorithm aversion, but little is known about how such norms become established. Most accounts emphasize technological and individual determinants, yet AI adoption unfolds within organizational social contexts shaped by peers and supervisors. We ask whether the source of the norm-peers or supervisors-shapes AI usage behavior. This question is practically relevant for organizations seeking to promote effective AI adoption. We conducted an online vignette experiment, complemented by qualitative data on participants' feelings and justifications after (counter-)normative behavior. In line with the theory, counter-normative choices elicited higher regret than norm-adherent choices. On average, choosing AI increased regret compared to choosing an human. This aversion was weaker when AI use was presented as the prevailing norm, indicating a statistically significant interaction between AI use and an AI-favoring norm. Participants also attributed less blame to technology than to humans, which increased regret when AI was chosen over human expertise. Both peer and supervisor influence emerged as relevant factors, though contrary to expectations they did not significantly affect regret. Our findings suggest that regret aversion, embedded in social norms, is a central mechanism driving imitation in AI-related decision-making.}},
  author       = {{Kornowicz, Jaroslaw and Pape, Maurice and Thommes, Kirsten}},
  journal      = {{Arxiv}},
  title        = {{{Would I regret being different? The influence of social norms on attitudes toward AI usage}}},
  doi          = {{10.48550/ARXIV.2509.04241}},
  year         = {{2025}},
}

@inbook{61820,
  abstract     = {{<jats:title>Abstract</jats:title>
          <jats:p>A scoring list is a sequence of simple decision models, where features are incrementally evaluated and scores of satisfied features are summed to be used for threshold-based decisions or for calculating class probabilities. In this paper, we introduce a new multi-class variant and compare it against previously introduced binary classification variants for incremental decisions, as well as multi-class variants for classical decision-making using all features. Furthermore, we introduce a new multi-class dataset to assess collaborative human-machine decision-making, which is suitable for user studies with non-expert participants. We demonstrate the usefulness of our approach by evaluating predictive performance and compared to the performance of participants without AI help.</jats:p>}},
  author       = {{Heid, Stefan and Kornowicz, Jaroslaw and Hanselle, Jonas and Thommes, Kirsten and Hüllermeier, Eyke}},
  booktitle    = {{Communications in Computer and Information Science}},
  isbn         = {{9783032083265}},
  issn         = {{1865-0929}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{MSL: Multi-class Scoring Lists for Interpretable Incremental Decision-Making}}},
  doi          = {{10.1007/978-3-032-08327-2_6}},
  year         = {{2025}},
}

@article{62213,
  author       = {{Kornowicz, Jaroslaw}},
  issn         = {{1044-7318}},
  journal      = {{International Journal of Human–Computer Interaction}},
  pages        = {{1--19}},
  publisher    = {{Informa UK Limited}},
  title        = {{{An Empirical Examination of the Evaluative AI Framework}}},
  doi          = {{10.1080/10447318.2025.2581260}},
  year         = {{2025}},
}

@inproceedings{57645,
  author       = {{Heid, Stefan and Kornowicz, Jaroslaw and Hanselle, Jonas Manuel and Hüllermeier, Eyke and Thommes, Kirsten}},
  booktitle    = {{PROCEEDINGS 34. WORKSHOP COMPUTATIONAL INTELLIGENCE}},
  pages        = {{233}},
  title        = {{{Human-AI Co-Construction of Interpretable Predictive Models: The Case of Scoring Systems}}},
  volume       = {{21}},
  year         = {{2024}},
}

@inproceedings{55403,
  abstract     = {{In this paper we consider the interactive processes by which an explainer and an explainee cooperate to produce an explanation, which we refer to as co-construction. Explainable Artificial Intelligence (XAI) is concerned with the development of intelligent systems and robots that can explain and justify their actions, decisions, recommendations, and so on. However, the cooperative construction of explanations remains a key but under-explored issue. This short paper proposes an architecture for intelligent systems that promotes a co-constructive and interactive approach to explanation generation. By outlining its basic components and their specific roles, we aim to contribute to the advancement of XAI computational frameworks that actively engage users in the explanation process.}},
  author       = {{Buschmeier, Hendrik and Cimiano, Philipp and Kopp, Stefan and Kornowicz, Jaroslaw and Lammert, Olesja and Matarese, Marco and Mindlin, Dimitry and Robrecht, Amelie Sophie and Vollmer, Anna-Lisa and Wagner, Petra and Wrede, Britta and Booshehri, Meisam}},
  booktitle    = {{Proceedings of the 2024 Workshop on Explainability Engineering}},
  location     = {{Lisbon, Portugal}},
  pages        = {{20--25}},
  publisher    = {{ACM}},
  title        = {{{Towards a Computational Architecture for Co-Constructive Explainable Systems}}},
  doi          = {{10.1145/3648505.3648509}},
  year         = {{2024}},
}

@article{57461,
  abstract     = {{This study empirically examines the "Evaluative AI" framework, which aims to enhance the decision-making process for AI users by transitioning from a recommendation-based approach to a hypothesis-driven one. Rather than offering direct recommendations, this framework presents users pro and con evidence for hypotheses to support more informed decisions. However, findings from the current behavioral experiment reveal no significant improvement in decision-making performance and limited user engagement with the evidence provided, resulting in cognitive processes similar to those observed in traditional AI systems. Despite these results, the framework still holds promise for further exploration in future research.
}},
  author       = {{Kornowicz, Jaroslaw}},
  journal      = {{arXiv}},
  title        = {{{An Empirical Examination of the Evaluative AI Framework}}},
  doi          = {{10.48550/ARXIV.2411.08583}},
  year         = {{2024}},
}

@inproceedings{48285,
  author       = {{Lebedeva, Anastasia and Kornowicz, Jaroslaw and Lammert, Olesja and Papenkordt, Jörg}},
  booktitle    = {{Artificial Intelligence in HCI}},
  title        = {{{The Role of Response Time for Algorithm Aversion in Fast and Slow Thinking Tasks}}},
  doi          = {{10.1007/978-3-031-35891-3_9}},
  year         = {{2023}},
}

@article{47953,
  author       = {{Kornowicz, Jaroslaw and Thommes, Kirsten}},
  isbn         = {{9783031358906}},
  issn         = {{0302-9743}},
  journal      = {{Artificial Intelligence in HCI}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Aggregating Human Domain Knowledge for Feature Ranking}}},
  doi          = {{10.1007/978-3-031-35891-3_7}},
  year         = {{2023}},
}

@inproceedings{51209,
  author       = {{Hanselle, Jonas Manuel and Kornowicz, Jaroslaw and Heid, Stefan and Thommes, Kirsten and Hüllermeier, Eyke}},
  booktitle    = {{LWDA’23: Learning, Knowledge, Data, Analysis. }},
  editor       = {{Leyer, M and Wichmann, J}},
  issn         = {{1613-0073}},
  title        = {{{Comparing Humans and Algorithms in Feature Ranking: A Case-Study in the Medical Domain}}},
  year         = {{2023}},
}

