@inproceedings{37312,
  abstract     = {{Optimal decision making requires appropriate evaluation of advice. Recent literature reports that algorithm aversion reduces the effectiveness of predictive algorithms. However, it remains unclear how people recover from bad advice given by an otherwise good advisor. Previous work has focused on algorithm aversion at a single time point. We extend this work by examining successive decisions in a time series forecasting task using an online between-subjects experiment (N = 87). Our empirical results do not confirm algorithm aversion immediately after bad advice. The estimated effect suggests an increasing algorithm appreciation over time. Our work extends the current knowledge on algorithm aversion with insights into how weight on advice is adjusted over consecutive tasks. Since most forecasting tasks are not one-off decisions, this also has implications for practitioners.}},
  author       = {{Leffrang, Dirk and Bösch, Kevin and Müller, Oliver}},
  booktitle    = {{Hawaii International Conference on System Sciences}},
  keywords     = {{Algorithm aversion, Time series, Decision making, Advice taking, Forecasting}},
  title        = {{{Do People Recover from Algorithm Aversion? An Experimental Study of Algorithm Aversion over Time}}},
  year         = {{2023}},
}

@inproceedings{50121,
  abstract     = {{Many researchers and practitioners see artificial intelligence as a game changer compared to classical statistical models. However, some software providers engage in “AI washing”, relabeling solutions that use simple statistical models as AI systems. By contrast, research on algorithm aversion unsystematically varied the labels for advisors and treated labels such as "artificial intelligence" and "statistical model" synonymously. This study investigates the effect of individual labels on users' actual advice utilization behavior. Through two incentivized online within-subjects experiments on regression tasks, we find that labeling human advisors with labels that suggest higher expertise leads to an increase in advice-taking, even though the content of the advice remains the same. In contrast, our results do not suggest such an expert effect for advice-taking from algorithms, despite differences in self-reported perception. These findings challenge the effectiveness of framing intelligent systems as AI-based systems and have important implications for both research and practice.}},
  author       = {{Leffrang, Dirk}},
  booktitle    = {{International Conference on Information Systems}},
  keywords     = {{Artificial Intelligence, Algorithm Appreciation, Framing, Advice-taking, Expertise}},
  location     = {{Hyderabad, India}},
  number       = {{10}},
  title        = {{{AI Washing: The Framing Effect of Labels on Algorithmic Advice Utilization}}},
  year         = {{2023}},
}

@inproceedings{50118,
  abstract     = {{Despite the widespread use of machine learning algorithms, their effectiveness is limited by a phenomenon known as algorithm aversion. Recent research concluded that unobserved variables can cause algorithm aversion. However, the impact of an unobserved variable on algorithm aversion remains unclear. Previous studies focused on situations where humans had more variables available than algorithms. We extend this research by conducting an online experiment with 94 participants, systematically varying the number of observable variables to the advisor and the advisor type. Surprisingly, our results did not confirm that an unobserved variable had a negative effect on advice-taking. Instead, we found a positive impact in an algorithm appreciation scenario. This study provides new insights into the paradoxical behavior in which people weigh advice more despite having fewer variables, as they correct for the advisor's errors. Practitioners should consider this behavior when designing algorithms and account for user correction behavior.}},
  author       = {{Leffrang, Dirk}},
  booktitle    = {{Wirtschaftsinformatik Conference}},
  keywords     = {{Algorithm aversion, Data, Decision-making, Advice-taking, Human-Computer Interaction}},
  location     = {{Paderborn}},
  number       = {{19}},
  title        = {{{The Broken Leg of Algorithm Appreciation: An Experimental Study on the Effect of Unobserved Variables on Advice Utilization}}},
  year         = {{2023}},
}

@inproceedings{26812,
  author       = {{Leffrang, Dirk and Müller, Oliver}},
  booktitle    = {{IEEE Workshop on TRust and EXpertise in Visual Analytics}},
  title        = {{{Should I Follow this Model? The Effect of Uncertainty Visualization on the Acceptance of Time Series Forecasts}}},
  doi          = {{10.1109/TREX53765.2021.00009}},
  year         = {{2021}},
}

@misc{75,
  author       = {{Leffrang, Dirk}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Online-Bewertung und Preissetzung auf Airbnb}}},
  year         = {{2017}},
}

