@article{65182,
  abstract     = {{<jats:p>The aggregation of rating metrics in reputation systems is crucial for mitigating information overload by condensing customer rating distributions into singular valence scores. While platforms typically employ technical aggregation functions, such as the arithmetic mean to capture product quality, it remains unclear whether these functions align with customers' innate aggregation patterns. To address this knowledge gap, we designed a controlled economic decision experiment to elicit customers' aggregation principles by analyzing their product ranking decisions and contrasting these with various reference functions. Our findings indicate that, on average, customers aggregate rating information in accordance with the arithmetic mean. However, a granular analysis at the individual level reveals significant heterogeneity in aggregation behavior, with a substantial cluster exhibiting binary patterns that focus equally on negative (1-2 star) and positive (4-5 star) ratings. Additional clusters concentrate on negative feedback, particularly 1-star ratings or 1-2 star ratings collectively. Notably, these inherent aggregation patterns exhibit stability across variations in numerical information presentation and are not significantly influenced by individual characteristics, such as online shopping experience, risk attitudes, or demographics. These findings suggest that while the arithmetic mean captures average consumer behavior, platforms could benefit from offering customizable aggregation options to better cater to diverse user preferences for processing rating distributions. By doing so, platforms can enhance the effectiveness of their reputation systems and improve the overall quality of decision-making for consumers.</jats:p>}},
  author       = {{van Straaten, Dirk and Mir Djawadi, Behnud and Melnikov, Vitalik and Hüllermeier, Eyke and Fahr, René}},
  journal      = {{SSRN Electronic Journal}},
  publisher    = {{Elsevier BV}},
  title        = {{{Aggregation Processes in Customer Rating Systems - Insights from an Economic Decision Experiment}}},
  doi          = {{http://dx.doi.org/10.2139/ssrn.6201258}},
  year         = {{2026}},
}

@article{65181,
  abstract     = {{<jats:p>In many Western societies, mass immigration has been one of the most divisive policy issues in recent years. Seemingly moderate inflows of migrants can have substantial demographic consequences in the long run, due to (1) higher fertility of the migrant population, (2) its younger age distribution, and (3) the possibility of family reunification. Yet, demography hardly appears in the policy debate, even in media outlets that are critical of mass immigration. This may indicate that the mechanics of population dynamics are not widely understood. We design a laboratory experiment in which we confront subjects with 30 different migration scenarios. Subjects have to decide when to stop a given inflow of migrants to achieve a target share of migrants after 60 years. In line with all our pre-registered hypotheses, in scenarios that contain elements of usual mass immigration the growth of the migrant population is systematically underestimated. This bias is even stronger in scenarios that closely resemble the German situation since the opening of the borders during the 2015 refugee crisis.</jats:p>}},
  author       = {{Abbink, Klaus and Mir Djawadi, Behnud}},
  journal      = {{SSRN Electronic Journal}},
  publisher    = {{Elsevier BV}},
  title        = {{{Migration and Long-Term Demographic Change: Can We Control the Numbers?}}},
  doi          = {{http://dx.doi.org/10.2139/ssrn.6343618}},
  year         = {{2026}},
}

@article{63910,
  author       = {{Mir Djawadi, Behnud}},
  journal      = {{SSRN Electronic Journal}},
  publisher    = {{Elsevier BV}},
  title        = {{{Dishonesty of Parents and Children – Evidence from a Field Experiment}}},
  doi          = {{http://dx.doi.org/10.2139/ssrn.6121987}},
  year         = {{2026}},
}

@article{63909,
  abstract     = {{<jats:sec><jats:title>Introduction</jats:title><jats:p>Human-machine interactions become increasingly pervasive in daily life and professional contexts, motivating research to examine how human behavior changes when individuals interact with machines rather than other humans. While most of the existing literature focused on human-machine interactions with algorithmic systems in advisory roles, research on human behavior in monitoring or verification processes that are conducted by automated systems remains largely absent. This is surprising given the growing implementation of algorithmic systems in institutions, particularly in tax enforcement and financial regulation, to help monitor and identify misreports, or in online labor platforms widely implementing algorithmic control to ensure that workers deliver high service quality. Our study examines how human dishonesty changes when verification of statements that may be untrue is performed by machines vs. humans, and how ambiguity in the verification process influences dishonest behavior.</jats:p></jats:sec><jats:sec><jats:title>Method</jats:title><jats:p>We design an incentivized laboratory experiment using a modified die-roll paradigm where participants privately observe a random draw and report the result, with higher reported numbers yielding greater monetary rewards. A probabilistic verification process introduces risk of identifying a lie and punishment, with treatments varying by verification entity (human vs. machine) and degree of ambiguity in the verification process (transparent vs. ambiguous).</jats:p></jats:sec><jats:sec><jats:title>Results</jats:title><jats:p>Our results show that under transparent verification rules, cheating magnitude does not significantly differ between human and machine auditors. However, under ambiguous conditions, cheating magnitude is significantly higher when machines verify participants' reports, reducing the prevalence of partial cheating while leading to behavioral polarization manifested as either complete honesty or maximal overreporting. The same applies when comparing reports to a machine entity under ambiguous and transparent verification rules.</jats:p></jats:sec><jats:sec><jats:title>Discussion</jats:title><jats:p>These findings emphasize the behavioral implications of algorithmic opacity in verification contexts. While machines can serve as effective auditors under transparent conditions, their black box nature combined with ambiguous verification processes may unintentionally incentivize more severe dishonesty. These insights have practical implications for designing automated oversight systems in tax audits, compliance, and workplace monitoring.</jats:p></jats:sec>}},
  author       = {{Protte, Marius and Mir Djawadi, Behnud}},
  issn         = {{2813-5296}},
  journal      = {{Frontiers in Behavioral Economics}},
  publisher    = {{Frontiers Media SA}},
  title        = {{{Human vs. algorithmic auditors: the impact of entity type and ambiguity on human dishonesty}}},
  doi          = {{10.3389/frbhe.2025.1645749}},
  volume       = {{4}},
  year         = {{2025}},
}

@article{63908,
  author       = {{Mir Djawadi, Behnud and Plaß, Sabrina and Loer, Sabrina}},
  issn         = {{0014-2921}},
  journal      = {{European Economic Review}},
  publisher    = {{Elsevier BV}},
  title        = {{{“I don’t believe that you believe what I believe”: an experiment on misperceptions of social norms and whistleblowing}}},
  doi          = {{10.1016/j.euroecorev.2025.105189}},
  volume       = {{180}},
  year         = {{2025}},
}

@article{63911,
  author       = {{Mir Djawadi, Behnud and Plaß, Sabrina and Loer, Sabrina}},
  journal      = {{SSRN Electronic Journal}},
  publisher    = {{Elsevier BV}},
  title        = {{{Social Information Interventions under Competing Norms: Evidence from a Whistleblowing Experiment}}},
  doi          = {{http://dx.doi.org/10.2139/ssrn.5345248}},
  year         = {{2025}},
}

@article{63912,
  author       = {{Mir Djawadi, Behnud and Wester, Lisa}},
  journal      = {{SSRN Electronic Journal}},
  publisher    = {{Elsevier BV}},
  title        = {{{Social Interaction and Feedback-Giving Behavior in the Sharing Economy - An Experimental Analysis}}},
  doi          = {{https://dx.doi.org/10.2139/ssrn.5345248}},
  year         = {{2025}},
}

@inproceedings{57290,
  author       = {{Kürpick, Christian and Schreiner, Nick and Krauß-Kodytek, Laura and Plaß, Sabrina and Scholz, Thorben and Kühn, Arno}},
  location     = {{Riga Technical University}},
  pages        = {{1--6}},
  title        = {{{Capabilities for the Strategic Alignment of  Sustainability and Digitalization in Manufacturing:  Insights from Theory and Practice}}},
  year         = {{2024}},
}

@inbook{48387,
  author       = {{Lebedeva, Anastasia and Protte, Marius and van Straaten, Dirk and Fahr, René}},
  booktitle    = {{Advances in Information and Communication}},
  location     = {{Berlin}},
  pages        = {{178–204}},
  publisher    = {{Springer, Cham}},
  title        = {{{Involvement of domain experts in the AI training does not affect adherence – An AutoML study}}},
  doi          = {{https://doi.org/10.1007/978-3-031-53960-2_13}},
  volume       = {{919}},
  year         = {{2024}},
}

@article{30341,
  author       = {{Hoyer, Britta and van Straaten, Dirk}},
  issn         = {{2214-8043}},
  journal      = {{Journal of Behavioral and Experimental Economics}},
  keywords     = {{General Social Sciences, Economics and Econometrics, Applied Psychology}},
  pages        = {{101869}},
  publisher    = {{Elsevier BV}},
  title        = {{{Anonymity and Self-Expression in Online Rating Systems - An Experimental Analysis}}},
  doi          = {{10.1016/j.socec.2022.101869}},
  volume       = {{98}},
  year         = {{2022}},
}

@article{21369,
  abstract     = {{Successful design of human-in-the-loop control sys- tems requires appropriate models for human decision makers. Whilst most paradigms adopted in the control systems literature hide the (limited) decision capability of humans, in behavioral economics individual decision making and optimization processes are well-known to be affected by perceptual and behavioral biases. Our goal is to enrich control engineering with some insights from behavioral economics research through exposing such biases in control-relevant settings.
This paper addresses the following two key questions:
1) How do behavioral biases affect decision making?
2) What is the role played by feedback in human-in-the-loop control systems?
Our experimental framework shows how individuals behave when faced with the task of piloting an UAV under risk and uncertainty, paralleling a real-world decision-making scenario. Our findings support the notion of humans in Cyberphysical Systems underlying behavioral biases regardless of – or even because of – receiving immediate outcome feedback. We observe substantial shares of drone controllers to act inefficiently through either flying excessively (overconfident) or overly conservatively (underconfident). Furthermore, we observe human-controllers to self-servingly misinterpret random sequences through being subject to a “hot hand fallacy”. We advise control engineers to mind the human component in order not to compromise technological accomplishments through human issues.}},
  author       = {{Protte, Marius and Fahr, René and Quevedo, Daniel E.}},
  journal      = {{IEEE Control Systems Magazine}},
  number       = {{6}},
  pages        = {{57 -- 76}},
  publisher    = {{IEEE}},
  title        = {{{Behavioral Economics for Human-in-the-loop Control Systems Design: Overconfidence and the hot hand fallacy}}},
  doi          = {{10.1109/MCS.2020.3019723}},
  volume       = {{40}},
  year         = {{2020}},
}

@article{80,
  abstract     = {{Models on network formation have often been extended to include the potential of network disruption in recent years. Whereas the theoretical research on network formation under the threat of disruption has thus gained prominence, hardly any experimental research exists so far. In this paper, we therefore experimentally study the emergence of networks including the aspect of a known external threat by relating theoretical predictions by Dzuibiński and Goyal (2013) to actual observed behaviour. We deal with the question if subjects in the role of a strategic Designer are able to form safe networks for least costs while facing a strategic Adversary who is going to attack their networks. Varying the costs for protecting nodes, we designed and tested two treatments with different predictions for the equilibrium network and investigated whether one of the least cost equilibrium networks was more likely to be reached. Furthermore, the influence of the subjects’ farsightedness on their decision-making process was elicited and analysed.

We find that while subjects are able to build safe networks in both treatments, equilibrium networks are only built in one of the two treatments. In the other treatment, predominantly safe networks are built but they are not for least costs. Additionally, we find that farsightedness –as measured in our experiment– has no influence on whether subjects are able to build safe or least cost equilibrium networks. Two robustness settings with a reduced external threat or more liberties to modify the initial networks qualitatively confirm our results. Overall, in this experiment observed behaviour is only partially in line with the theoretical predictions by Dzuibiński and Goyal (2013).}},
  author       = {{Endres, Angelika Elfriede and Recker, Sonja and Mir Djawadi, Behnud and Hoyer, Britta}},
  journal      = {{Journal of Economic Behavior and Organization }},
  pages        = {{708--734}},
  title        = {{{Network Formation and Disruption - An Experiment: Are equilibrium networks too complex?}}},
  doi          = {{10.1016/j.jebo.2018.11.004}},
  volume       = {{157}},
  year         = {{2019}},
}

@article{63913,
  author       = {{Mir Djawadi, Behnud and Nieken, Petra}},
  issn         = {{1556-5068}},
  journal      = {{SSRN Electronic Journal}},
  publisher    = {{Elsevier BV}},
  title        = {{{Labor Market Chances of Whistleblowers - Potential Drivers of Discrimination}}},
  doi          = {{10.2139/ssrn.3481126}},
  year         = {{2019}},
}

@inproceedings{115,
  abstract     = {{Whenever customers have to decide between different instances of the same product, they are interested in buying the best product. In contrast, companies are interested in reducing the construction effort (and usually as a consequence thereof, the quality) to gain profit. The described setting is widely known as opposed preferences in quality of the product and also applies to the context of service-oriented computing. In general, service-oriented computing emphasizes the construction of large software systems out of existing services, where services are small and self-contained pieces of software that adhere to a specified interface. Several implementations of the same interface are considered as several instances of the same service. Thereby, customers are interested in buying the best service implementation for their service composition wrt. to metrics, such as costs, energy, memory consumption, or execution time. One way to ensure the service quality is to employ certificates, which can come in different kinds: Technical certificates proving correctness can be automatically constructed by the service provider and again be automatically checked by the user. Digital certificates allow proof of the integrity of a product. Other certificates might be rolled out if service providers follow a good software construction principle, which is checked in annual audits. Whereas all of these certificates are handled differently in service markets, what they have in common is that they influence the buying decisions of customers. In this paper, we review state-of-the-art developments in certification with respect to service-oriented computing. We not only discuss how certificates are constructed and handled in service-oriented computing but also review the effects of certificates on the market from an economic perspective.}},
  author       = {{Jakobs, Marie-Christine and Krämer, Julia and van Straaten, Dirk and Lettmann, Theodor}},
  booktitle    = {{The Ninth International Conferences on Advanced Service Computing (SERVICE COMPUTATION)}},
  editor       = {{Marcelo De Barros, Janusz Klink,Tadeus Uhl, Thomas Prinz}},
  pages        = {{7--12}},
  title        = {{{Certiﬁcation Matters for Service Markets}}},
  year         = {{2017}},
}

@techreport{1057,
  author       = {{Sürücü, Oktay and Mir Djawadi, Behnud and Brangewitz, Sonja}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Asymmetric Dominance Effect with Multiple Decoys for Low- and High-Variance Lotteries}}},
  year         = {{2017}},
}

@article{4870,
  author       = {{Fahr, René}},
  journal      = {{Personnel Quarterly Vol. 4}},
  title        = {{{Verantwortung macht Sinn: Corporate Social Responsibility}}},
  year         = {{2016}},
}

@techreport{63915,
  author       = {{Bartuli, Jenny and Mir Djawadi, Behnud and René, Fahr}},
  title        = {{{Business Ethics in Organizations: An Experimental Examination of Whistleblowing and Personality}}},
  volume       = {{No. 10190}},
  year         = {{2016}},
}

@article{63914,
  author       = {{Mir Djawadi, Behnud and Fahr, René and Turk, Florian}},
  issn         = {{1556-5068}},
  journal      = {{SSRN Electronic Journal}},
  publisher    = {{Elsevier BV}},
  title        = {{{Tailored Financial Incentives to Fight Medical Non-Persistence in Therapeutic Treatment: A Behavioral Economic Engineering Approach}}},
  doi          = {{10.2139/ssrn.2713058}},
  year         = {{2016}},
}

@article{228,
  abstract     = {{We investigate the pervasiveness of lying in professional contexts such as insurance fraud, tax evasion and untrue job applications. We argue that lying in professional contexts share three characterizing features: (1) the gain from the dishonest behavior is uncertain, (2) the harm that lying may cause to the other party is only indirect and (3) lies are more indirect lies by action or written statements. Conducted as a field experiment with a heterogenous group of participants during a University ‘‘Open House Day’’, our ‘‘gumball-machineexperiment’’ provides field evidence on how preferences for lying are shaped in situations typically found in professional contexts which we consider to be particularly prone to lying behavior compared to other contexts. As a key innovation, our experimental design allows measuring exact levels of cheating behavior under anonymous conditions. We find clean evidence that cheating is prevalent across all sub groups and that more than 32% of the population cheats for their own gain. However, an analysis of the cheating rates with respect to highest educational degree and professional status reveals that students cheat more than non-students. This finding warrants a careful interpretation of generalizing laboratory findings with student subjects about the prevalence of cheating in the population.}},
  author       = {{Fahr, Rene and Mir Djawadi, Behnud}},
  journal      = {{Journal of Economic Psychology}},
  pages        = {{48--59}},
  publisher    = {{Elsevier}},
  title        = {{{“…and they are really lying”: Clean Evidence on the Pervasiveness of Cheating in Professional Contexts from a Field Experiment.}}},
  doi          = {{10.1016/j.joep.2015.03.002}},
  year         = {{2015}},
}

@article{4871,
  author       = {{Djawadi, Behnud Mir and Fahr, René}},
  journal      = {{Journal of Economic Psychology}},
  pages        = {{48--59}},
  title        = {{{"...and they are really lying: Clean Evidence on the Pervasiveness of Cheating in Professional Contexts from a Field Experiment"}}},
  volume       = {{48}},
  year         = {{2015}},
}

