@article{57892,
  abstract     = {{The present paper discusses the extent to which Large Language Models (LLMs) may affect the scientific enterprise, reinforcing or mitigating existing structural inequalities expressed by the Matthew Effect and introducing a “bot delusion” in academia. In a theory-led thought experiment, we first focus on the academic publication and citation system and develop three scenarios of the anticipated consequences of using LLMs: reproducing content and status quo (Scenario 1), enabling content coherence evaluation (Scenario 2) and content evaluation (Scenario 3). Second, we discuss the interaction between the use of LLMs and academic (counter)norms for citation selection and their impact on the publication and citation system. Finally, we introduce communal counter-norms to capture academics’ loyal citation behavior and develop three future scenarios that academia may face when LLMs are widely used in the research process, namely status quo future of science, mixed-access future, and open science future.}},
  author       = {{Wieczorek, Oliver and Steinhardt, Isabel and Schmidt, Rebecca and Mauermeister, Sylvi and Schneijderberg, Christian}},
  issn         = {{0016-3287}},
  journal      = {{Futures}},
  keywords     = {{Large Language Models, Matthew Effect, Academic Publishing and Citation Systems, Scientific Norms, Thought Experiment}},
  publisher    = {{Elsevier BV}},
  title        = {{{The Bot Delusion. Large language models and anticipated consequences for academics’ publication and citation behavior}}},
  doi          = {{10.1016/j.futures.2024.103537}},
  volume       = {{166}},
  year         = {{2024}},
}

@article{20212,
  abstract     = {{Ideational impact refers to the uptake of a paper's ideas and concepts by subsequent research. It is defined in stark contrast to total citation impact, a measure predominantly used in research evaluation that assumes that all citations are equal. Understanding ideational impact is critical for evaluating research impact and understanding how scientific disciplines build a cumulative tradition. Research has only recently developed automated citation classification techniques to distinguish between different types of citations and generally does not emphasize the conceptual content of the citations and its ideational impact. To address this problem, we develop Deep Content-enriched Ideational Impact Classification (Deep-CENIC) as the first automated approach for ideational impact classification to support researchers' literature search practices. We evaluate Deep-CENIC on 1,256 papers citing 24 information systems review articles from the IT business value domain. We show that Deep-CENIC significantly outperforms state-of-the-art benchmark models. We contribute to information systems research by operationalizing the concept of ideational impact, designing a recommender system for academic papers based on deep learning techniques, and empirically exploring the ideational impact of the IT business value domain.
}},
  author       = {{Prester, Julian and Wagner, Gerit and Schryen, Guido and Hassan, Nik Rushdi}},
  journal      = {{Decision Support Systems}},
  keywords     = {{Ideational impact, citation classification, academic recommender systems, natural language processing, deep learning, cumulative tradition}},
  number       = {{January}},
  title        = {{{Classifying the Ideational Impact of Information Systems Review Articles: A Content-Enriched Deep Learning Approach}}},
  volume       = {{140}},
  year         = {{2021}},
}

@article{20844,
  abstract     = {{Review papers are essential for knowledge development in IS. While some are cited twice a day, others accumulate single digit citations over a decade. The magnitude of these differences prompts us to analyze what distinguishes those reviews that have proven to be integral to scientific progress from those that might be considered less impactful. Our results highlight differences between reviews aimed at describing, understanding, explaining, and theory testing. Beyond the control variables, they demonstrate the importance of methodological transparency and the development of research agendas. These insights inform all stakeholders involved in the development and publication of review papers.}},
  author       = {{Wagner, Gerit and Prester, Julian and Roche, Maria and Schryen, Guido and Benlian, Alexander and Paré, Guy and Templier, Mathieu}},
  journal      = {{Information & Management}},
  keywords     = {{Literature review, review papers, scientometric, scientific impact, citation analysis}},
  number       = {{3}},
  title        = {{{Which Factors Affect the Scientific Impact of Review Papers in IS Research? A Scientometric Study}}},
  volume       = {{58}},
  year         = {{2021}},
}

@article{33372,
  abstract     = {{<jats:p>Academics may actively respond to the expectations of the academic status market, which have largely been shaped by the World University Rankings (WURs). This study empirically examines how academics’ citation patterns have changed in response to the rise of an “evaluation environment” in academia. We regard the WURs to be a macro-level trigger for cementing a bibliometric-based evaluation environment in academia. Our analyses of citation patterns in papers published in two higher education journals explicitly considered three distinct periods: the pre-WURs (1990–2003), the period of WURs implementation (2004–2010), and the period of adaption to WURs (2011–2017). We applied the nonparametric Kaplan–Meier method to compare first-citation speeds of papers published across the three periods. We found that not only has first-citation speed become faster, but first-citation probability has also increased following the emergence of the WURs. Applying Cox proportional hazard models to first-citation probabilities, we identified journal impact factors and third-party funding as factors influencing first-citation probability, while other author- and paper-related factors showed limited effects. We also found that the general effects of different factors on first-citation speeds have changed with the emergence of the WURs. The findings expand our understanding of the citation patterns of academics in the rise of WURs and provide practical grounds for research policy as well as higher education policy.</jats:p>}},
  author       = {{Lee, Soo Jeung and Schneijderberg, Christian and Kim, Yangson and Steinhardt, Isabel}},
  issn         = {{2071-1050}},
  journal      = {{Sustainability}},
  keywords     = {{world university rankings, citation, first-citation speed, Minerva, Studies in Higher Education}},
  number       = {{17}},
  publisher    = {{MDPI AG}},
  title        = {{{Have Academics’ Citation Patterns Changed in Response to the Rise of World University Rankings? A Test Using First-Citation Speeds}}},
  doi          = {{10.3390/su13179515}},
  volume       = {{13}},
  year         = {{2021}},
}

@techreport{17019,
  abstract     = {{The scientific impact of research papers is multi-dimensional and can be determined quantitatively by means of citation analysis and qualitatively by means of content analysis. Accounting for the widely acknowledged limitations of pure citation analysis, we adopt a knowledge-based perspective on scientific impact to develop a methodology for content-based citation analysis which allows determining how papers have enabled knowledge development in subsequent research (knowledge impact). As knowledge development differs between research genres, we develop a new knowledgebased citation analysis methodology for the genre of standalone literature reviews (LRs). We apply the suggested methodology to the IS business value domain by manually coding 22 LRs and 1,228 citing papers (CPs) and show that the results challenge the assumption that citations indicate knowledge impact. We derive implications for distinguishing knowledge impact from citation impact in the LR genre. Finally, we develop recommendations for authors of LRs, scientific evaluation committees and editorial boards of journals how to apply and benefit from the suggested methodology, and we discuss its efficiency and automatization.}},
  author       = {{Schryen, Guido and Wagner, Gerit and Benlian, Alexander}},
  keywords     = {{Scientific impact, knowledge impact, content-based citation analysis, methodology}},
  title        = {{{Distinguishing Knowledge Impact from Citation Impact: A Methodology for Analysing Knowledge Impact for the Literature Review Genre}}},
  year         = {{2020}},
}

