@article{63611,
  abstract     = {{When humans interact with artificial intelligence (AI), one desideratum is appropriate trust. Typically, appropriate trust encompasses that humans trust AI except for instances in which they either explicitly notice AI errors or are suspicious that errors could be present. So far, appropriate trust or related notions have mainly been investigated by assessing trust and reliance. In this contribution, we argue that these assessments are insufficient to measure the complex aim of appropriate trust and the related notion of healthy distrust. We introduce and test the perspective of covert visual attention as an additional indicator for appropriate trust and draw conceptual connections to the notion of healthy distrust. To test the validity of our conceptualization, we formalize visual attention using the Theory of Visual Attention and measure its properties that are potentially relevant to appropriate trust and healthy distrust in an image classification task. Based on temporal-order judgment performance, we estimate participants' attentional capacity and attentional weight toward correct and incorrect mock-up AI classifications. We observe that misclassifications reduce attentional capacity compared to correct classifications. However, our results do not indicate that this reduction is beneficial for a subsequent judgment of the classifications. The attentional weighting is not affected by the classifications' correctness but by the difficulty of categorizing the stimuli themselves. We discuss these results, their implications, and the limited potential for using visual attention as an indicator of appropriate trust and healthy distrust.}},
  author       = {{Peters, Tobias Martin and Biermeier, Kai and Scharlau, Ingrid}},
  issn         = {{1664-1078}},
  journal      = {{Frontiers in Psychology}},
  keywords     = {{appropriate trust, healthy distrust, visual attention, Theory of Visual Attention, human-AI interaction, Bayesian cognitive model, image classification}},
  publisher    = {{Frontiers Media SA}},
  title        = {{{Assessing healthy distrust in human-AI interaction: interpreting changes in visual attention}}},
  doi          = {{10.3389/fpsyg.2025.1694367}},
  volume       = {{16}},
  year         = {{2026}},
}

@inbook{65310,
  abstract     = {{Trust between client and consultant is perhaps the most important asset in con-sulting, as this is a highly intangible knowledge-intensive business that concerns is-sues of outstanding strategic and operational importance for the customers. Cli-ents who have not worked with a particular consultancy face considerable risk when they place an order while lacking reliable information about the service quality they can expect. There is a strong link between trust and reputation, as the positive reputation of a consultancy can act as a substitute for a new client’s missing individual experience with the provider, fostering trust in the service quali-ty. Thus, creating, maintaining, and demonstrating a good reputation is of signifi-cant importance for consultancies in a very competitive industry.
To facilitate trustworthy signals, we design and implement a novel reputation mechanism that carries a monetary weight stored on a blockchain network as an immutable, decentralized, and transparent ledger. Based on an implementation in the Ethereum network and subsequent evaluation, we conclude that the reputation mechanism can contribute to leveling information asymmetry and reducing risk while increasing reputation and trust. The mechanism lends itself to being used in other business-to-business scenarios that suffer from similar information asymmetries.}},
  author       = {{Hemmrich, Simon and Nissen, Volker}},
  booktitle    = {{ Advanced Studies in Consulting Research and Digitalization – A Scientific Update on the Digital Transformation of the Consulting Industry. Springer.}},
  editor       = {{Nissen, Volker}},
  keywords     = {{Reputation Systems, Consulting, Design Science Invention, Incentive, Blockchain, Monetary ratings, building trust, reduce information asymmetry consulting, B2B reputation system, consulting risk reduction, supplier evaluation system}},
  title        = {{{A blockchain-based reputation system for consulting}}},
  year         = {{2026}},
}

@article{59756,
  abstract     = {{A current concern in the field of Artificial Intelligence (AI) is to ensure the trustworthiness of AI systems. The development of explainability methods is one prominent way to address this, which has often resulted in the assumption that the use of explainability will lead to an increase in the trust of users and wider society. However, the dynamics between explainability and trust are not well established and empirical investigations of their relation remain mixed or inconclusive.
In this paper we provide a detailed description of the concepts of user trust and distrust in AI and their relation to appropriate reliance. For that we draw from the fields of machine learning, human–computer interaction, and the social sciences. Based on these insights, we have created a focused study of empirical literature of existing empirical studies that investigate the effects of AI systems and XAI methods on user (dis)trust, in order to substantiate our conceptualization of trust, distrust, and reliance. With respect to our conceptual understanding we identify gaps in existing empirical work. With clarifying the concepts and summarizing the empirical studies, we aim to provide researchers, who examine user trust in AI, with an improved starting point for developing user studies to measure and evaluate the user’s attitude towards and reliance on AI systems.}},
  author       = {{Visser, Roel and Peters, Tobias Martin and Scharlau, Ingrid and Hammer, Barbara}},
  issn         = {{1389-0417}},
  journal      = {{Cognitive Systems Research}},
  keywords     = {{XAI, Appropriate trust, Distrust, Reliance, Human-centric evaluation, Trustworthy AI}},
  publisher    = {{Elsevier BV}},
  title        = {{{Trust, distrust, and appropriate reliance in (X)AI: A conceptual clarification of user trust and survey of its empirical evaluation}}},
  doi          = {{10.1016/j.cogsys.2025.101357}},
  year         = {{2025}},
}

@article{59755,
  abstract     = {{Due to the application of Artificial Intelligence (AI) in high-risk domains like law or medicine,
trustworthy AI and trust in AI are of increasing scientific and public relevance. A typical conception,
for example in the context of medical diagnosis, is that a knowledgeable user receives AIgenerated
classification as advice. Research to improve such interactions often aims to foster the
user’s trust, which in turn should improve the combined human-AI performance. Given that AI
models can err, we argue that the possibility to critically review, thus to distrust, an AI decision is
an equally interesting target of research.
We created two image classification scenarios in which the participants received mock-up
AI advice. The quality of the advice decreases for a phase of the experiment. We studied the
task performance, trust and distrust of the participants, and tested whether an instruction to
remain skeptical and review each piece of advice led to a better performance compared to a
neutral condition. Our results indicate that this instruction does not improve but rather worsens
the participants’ performance. Repeated single-item self-report of trust and distrust shows an
increase in trust and a decrease in distrust after the drop in the AI’s classification quality, with no
difference between the two instructions. Furthermore, via a Bayesian Signal Detection Theory
analysis, we provide a procedure to assess appropriate reliance in detail, by quantifying whether
the problems of under- and over-reliance have been mitigated. We discuss implications of our
results for the usage of disclaimers before interacting with AI, as prominently used in current
LLM-based chatbots, and for trust and distrust research.}},
  author       = {{Peters, Tobias Martin and Scharlau, Ingrid}},
  journal      = {{Frontiers in Psychology}},
  keywords     = {{trust in AI, trust, distrust, human-AI interaction, Signal Detection Theory, Bayesian parameter estimation, image classification}},
  title        = {{{Interacting with fallible AI: Is distrust helpful when receiving AI misclassifications?}}},
  doi          = {{10.3389/fpsyg.2025.1574809}},
  volume       = {{16}},
  year         = {{2025}},
}

@article{61123,
  abstract     = {{<jats:p>Knowledge graphs are used by a growing number of applications to represent structured data. Hence, evaluating the veracity of assertions in knowledge graphs—dubbed fact checking—is currently a challenge of growing importance. However, manual fact checking is commonly impractical due to the sheer size of knowledge graphs. This paper is a systematic survey of recent works on automatic fact checking with a focus on knowledge graphs. We present recent fact-checking approaches, the varied sources they use as background knowledge, and the features they rely upon. Finally, we draw conclusions pertaining to possible future research directions in fact checking knowledge graphs.</jats:p>}},
  author       = {{Qudus, Umair and Röder, Michael and Saleem, Muhammad and Ngonga Ngomo, Axel-Cyrille}},
  issn         = {{0360-0300}},
  journal      = {{ACM Computing Surveys}},
  keywords     = {{fact checking, knowledge graphs, fact-checkers, check worthiness, evidence retrieval, trust, veracity.}},
  publisher    = {{Association for Computing Machinery (ACM)}},
  title        = {{{Fact Checking Knowledge Graphs -- A Survey}}},
  doi          = {{10.1145/3749838}},
  volume       = {{58}},
  year         = {{2025}},
}

@article{60050,
  abstract     = {{Reputation systems to rate companies’ performances remain largely unexplored inresearch and are scarcely used in business-to-business (B2B) practice. Such systemsare essential for businesses seeking trustworthy partners, as they help reduce infor-mation asymmetry, lower buyers’ transaction risks, and allow high-quality serviceproviders to justify premium pricing. Unlike traditional review-based systems in thebusiness-to-consumer (B2C) context, we propose a B2B reputation mechanism inwhich buyers commit to a rating payment before a transaction. Once the buyer final-izes the rating, this payment is executed and recorded on a blockchain as an immu-table, secure ledger. Our system mimics natural trust-building mechanisms with rat-ings that are (1) monetary-based, (2) stake-based, (3) non-aggregated, (4) involvecounter-ratings, (5) selectively sellable, (6) individually comparable, (7) stored ona blockchain, (8) and monitored by a third instance. This system provides a novelapproach to fostering trust in B2B transactions by reducing information asymme-try and transaction risk. We illustrate the mechanism’s application in the consultingsector. Our analysis has identified 23 institutional trust and distrust dimensions thatpromote establishing institutional trust through the proposed mechanism. Qualita-tive interviews suggest that, while complex and challenging to apply, this mecha-nism can foster trust in B2B transactions. Given the low maturity in the applicationdomain—rating professional business services with business reputation systems—and solution domain—using monetary stakes for ratings, this system stands as apotential invention.}},
  author       = {{Hemmrich, Simon and Nissen, Volker and Beverungen, Daniel and Pauls, Josias Daniel Miño}},
  journal      = {{Information Systems and e-Business Management}},
  keywords     = {{Information asymmetry, Trust, Reputation system, Monetary rating, Blockchain, Consulting}},
  number       = {{1}},
  publisher    = {{Springer}},
  title        = {{{Blockchain‑based reputation systemsfor business‑to‑business services: designing a reputation mechanism to reduce information asymmetry in professional consulting}}},
  doi          = {{https://doi.org/10.1007/s10257-025-00702-9}},
  volume       = {{23}},
  year         = {{2025}},
}

@inproceedings{49785,
  abstract     = {{Reputation is indispensable for online business since it supports customers in their buying decisions and
allows sellers to justify premium prices. While IS research has investigated reputation systems mainly
as review systems on online platforms for business-to-consumer (B2C) transactions, no proper solutions
have been developed for business-to-business (B2B) transactions yet. We use blockchain technology to
propose a new class of reputation systems that apply ratings as voluntary bonus payments: Before a
transaction is performed, customers commit to pay a bonus that is granted if a service provider has
performed a service properly. As opposed to rival reputation systems that build on cumulated ratings
or reviews, our system enables monetized reputation mechanisms that are inextricably linked with online
transactions. We expect this system class to provide more trustworthy ratings, which might reduce
agency costs and serve quality providers to establish a reputation towards new customers, building on
second-order trust.}},
  author       = {{Hemmrich, Simon}},
  booktitle    = {{Proceedings of 31st European Conference on Information Systems (ECIS 2023)}},
  keywords     = {{Trust, Risk, Reputation System, Blockchain Technology, Business Reputation System.}},
  location     = {{Kristiansand}},
  title        = {{{Business Reputation Systems based on Blockchain Technology—A Risky Advance}}},
  year         = {{2023}},
}

@article{29048,
  abstract     = {{We study the bargaining behavior between auditor and auditee in a tax setting and scrutinize
the effect of interpersonal trust and trust in government on both parties’ concessions.
We find evidence that both kinds of trust affect the concessionary behavior, albeit
in different ways. While trust in government affects concessionary behavior in line with
intuitive predictions, we find that interpersonal trust only affects tax auditors. For high
interpersonal trust, the alleviating effect of high trust in government on tax auditors’
concessions is less pronounced. Our findings help tax authorities to shape programs to
enhance compliance in an atmosphere of trust.}},
  author       = {{Eberhartinger, Eva and Speitmann, Raffael and Sureth-Sloane, Caren and Wu, Yuchen}},
  journal      = {{FinanzArchiv / European Journal of Public Finance}},
  keywords     = {{Behavioral Taxation, Concessionary Behavior, Interpersonal Trust, Tax Audit, Trust in Government}},
  number       = {{1-2}},
  pages        = {{112--155}},
  title        = {{{How Does Trust Affect Concessionary Behavior in Tax Bargaining?}}},
  volume       = {{78}},
  year         = {{2022}},
}

@inproceedings{9613,
  abstract     = {{The ability to openly evaluate products, locations and services is an achievement of the Web 2.0. It has never been easier to inform oneself about the quality of products or services and possible alternatives. Forming one’s own opinion based on the impressions of other people can lead to better experiences. However, this presupposes trust in one’s fellows as well as in the quality of the review platforms. In previous work on physician reviews and the corresponding websites, it was observed that there occurs faulty behavior by some reviewers and there were noteworthy differences in the technical implementation of the portals and in the efforts of site operators to maintain high quality reviews. These experiences raise new questions regarding what trust means on review platforms, how trust arises and how easily it can be destroyed.}},
  author       = {{Kersting, Joschka and Bäumer, Frederik Simon and Geierhos, Michaela}},
  booktitle    = {{Proceedings of the 4th International Conference on Internet of Things, Big Data and Security}},
  editor       = {{Ramachandran, Muthu and Walters, Robert and Wills, Gary and Méndez Muñoz, Víctor and Chang, Victor}},
  isbn         = {{978-989-758-369-8}},
  keywords     = {{Trust, Physician Reviews, Network Analysis}},
  location     = {{Heraklion, Greece}},
  pages        = {{147--155}},
  publisher    = {{SCITEPRESS}},
  title        = {{{In Reviews We Trust: But Should We? Experiences with Physician Review Websites}}},
  year         = {{2019}},
}

@inproceedings{5598,
  abstract     = {{Emerging digital environments and infrastructures, such as distributed services and computing services, have generated new options of communication, information sharing, and resource utilization in past years. Different distributed trust concepts are applied to increase trust in such systems. However, these concepts yield to rather complex architectures which make it difficult to determine which component or system needs to be trusted. This paper presents a novel trust measurement method for distributed systems which enables the t identification of weak points in the overall system architecture. The measurement method includes the specification of a formal trust language and its representation by means of propositional logic formulas. The applicability of the proposed concepts is demonstrated by conducting a case study on the Internet voting system that was used in the 2007 parliamentary elections in Estonia.}},
  author       = {{Volkamer, Melanie and Schryen, Guido}},
  booktitle    = {{Proceedings of the 23rd Bled eConference}},
  keywords     = {{distributed trust concepts, measuring etrust, Internet voting}},
  title        = {{{Measuring eTrust in distributed systems - General Concept and Application to Internet Voting}}},
  year         = {{2010}},
}

