@article{61339,
  author       = {{Protte, Marius and Djawadi, Behnud Mir}},
  journal      = {{Frontiers in Behavioral Economics}},
  keywords     = {{cheating, human-machine interaction, ambiguity, verification process, algorithm aversion, algorithm appreciation}},
  pages        = {{1645749}},
  title        = {{{Human vs. Algorithmic Auditors: The Impact of Entity Type and Ambiguity on Human Dishonesty}}},
  doi          = {{10.3389/frbhe.2025.1645749}},
  volume       = {{4}},
  year         = {{2025}},
}

@article{63909,
  abstract     = {{<jats:sec><jats:title>Introduction</jats:title><jats:p>Human-machine interactions become increasingly pervasive in daily life and professional contexts, motivating research to examine how human behavior changes when individuals interact with machines rather than other humans. While most of the existing literature focused on human-machine interactions with algorithmic systems in advisory roles, research on human behavior in monitoring or verification processes that are conducted by automated systems remains largely absent. This is surprising given the growing implementation of algorithmic systems in institutions, particularly in tax enforcement and financial regulation, to help monitor and identify misreports, or in online labor platforms widely implementing algorithmic control to ensure that workers deliver high service quality. Our study examines how human dishonesty changes when verification of statements that may be untrue is performed by machines vs. humans, and how ambiguity in the verification process influences dishonest behavior.</jats:p></jats:sec><jats:sec><jats:title>Method</jats:title><jats:p>We design an incentivized laboratory experiment using a modified die-roll paradigm where participants privately observe a random draw and report the result, with higher reported numbers yielding greater monetary rewards. A probabilistic verification process introduces risk of identifying a lie and punishment, with treatments varying by verification entity (human vs. machine) and degree of ambiguity in the verification process (transparent vs. ambiguous).</jats:p></jats:sec><jats:sec><jats:title>Results</jats:title><jats:p>Our results show that under transparent verification rules, cheating magnitude does not significantly differ between human and machine auditors. However, under ambiguous conditions, cheating magnitude is significantly higher when machines verify participants' reports, reducing the prevalence of partial cheating while leading to behavioral polarization manifested as either complete honesty or maximal overreporting. The same applies when comparing reports to a machine entity under ambiguous and transparent verification rules.</jats:p></jats:sec><jats:sec><jats:title>Discussion</jats:title><jats:p>These findings emphasize the behavioral implications of algorithmic opacity in verification contexts. While machines can serve as effective auditors under transparent conditions, their black box nature combined with ambiguous verification processes may unintentionally incentivize more severe dishonesty. These insights have practical implications for designing automated oversight systems in tax audits, compliance, and workplace monitoring.</jats:p></jats:sec>}},
  author       = {{Protte, Marius and Mir Djawadi, Behnud}},
  issn         = {{2813-5296}},
  journal      = {{Frontiers in Behavioral Economics}},
  publisher    = {{Frontiers Media SA}},
  title        = {{{Human vs. algorithmic auditors: the impact of entity type and ambiguity on human dishonesty}}},
  doi          = {{10.3389/frbhe.2025.1645749}},
  volume       = {{4}},
  year         = {{2025}},
}

@inbook{48387,
  author       = {{Lebedeva, Anastasia and Protte, Marius and van Straaten, Dirk and Fahr, René}},
  booktitle    = {{Advances in Information and Communication}},
  location     = {{Berlin}},
  pages        = {{178–204}},
  publisher    = {{Springer, Cham}},
  title        = {{{Involvement of domain experts in the AI training does not affect adherence – An AutoML study}}},
  doi          = {{https://doi.org/10.1007/978-3-031-53960-2_13}},
  volume       = {{919}},
  year         = {{2024}},
}

@article{21369,
  abstract     = {{Successful design of human-in-the-loop control sys- tems requires appropriate models for human decision makers. Whilst most paradigms adopted in the control systems literature hide the (limited) decision capability of humans, in behavioral economics individual decision making and optimization processes are well-known to be affected by perceptual and behavioral biases. Our goal is to enrich control engineering with some insights from behavioral economics research through exposing such biases in control-relevant settings.
This paper addresses the following two key questions:
1) How do behavioral biases affect decision making?
2) What is the role played by feedback in human-in-the-loop control systems?
Our experimental framework shows how individuals behave when faced with the task of piloting an UAV under risk and uncertainty, paralleling a real-world decision-making scenario. Our findings support the notion of humans in Cyberphysical Systems underlying behavioral biases regardless of – or even because of – receiving immediate outcome feedback. We observe substantial shares of drone controllers to act inefficiently through either flying excessively (overconfident) or overly conservatively (underconfident). Furthermore, we observe human-controllers to self-servingly misinterpret random sequences through being subject to a “hot hand fallacy”. We advise control engineers to mind the human component in order not to compromise technological accomplishments through human issues.}},
  author       = {{Protte, Marius and Fahr, René and Quevedo, Daniel E.}},
  journal      = {{IEEE Control Systems Magazine}},
  number       = {{6}},
  pages        = {{57 -- 76}},
  publisher    = {{IEEE}},
  title        = {{{Behavioral Economics for Human-in-the-loop Control Systems Design: Overconfidence and the hot hand fallacy}}},
  doi          = {{10.1109/MCS.2020.3019723}},
  volume       = {{40}},
  year         = {{2020}},
}

@misc{21371,
  author       = {{Protte, Marius}},
  title        = {{{The effect of organizational support on whistleblowing behavior - An experimental analysis}}},
  year         = {{2019}},
}

