@article{57892,
  abstract     = {{The present paper discusses the extent to which Large Language Models (LLMs) may affect the scientific enterprise, reinforcing or mitigating existing structural inequalities expressed by the Matthew Effect and introducing a “bot delusion” in academia. In a theory-led thought experiment, we first focus on the academic publication and citation system and develop three scenarios of the anticipated consequences of using LLMs: reproducing content and status quo (Scenario 1), enabling content coherence evaluation (Scenario 2) and content evaluation (Scenario 3). Second, we discuss the interaction between the use of LLMs and academic (counter)norms for citation selection and their impact on the publication and citation system. Finally, we introduce communal counter-norms to capture academics’ loyal citation behavior and develop three future scenarios that academia may face when LLMs are widely used in the research process, namely status quo future of science, mixed-access future, and open science future.}},
  author       = {{Wieczorek, Oliver and Steinhardt, Isabel and Schmidt, Rebecca and Mauermeister, Sylvi and Schneijderberg, Christian}},
  issn         = {{0016-3287}},
  journal      = {{Futures}},
  keywords     = {{Large Language Models, Matthew Effect, Academic Publishing and Citation Systems, Scientific Norms, Thought Experiment}},
  publisher    = {{Elsevier BV}},
  title        = {{{The Bot Delusion. Large language models and anticipated consequences for academics’ publication and citation behavior}}},
  doi          = {{10.1016/j.futures.2024.103537}},
  volume       = {{166}},
  year         = {{2024}},
}

