@article{58076,
  abstract     = {{This paper presents the concept of Information Circularity Assistance, which provides decision support in the early stages of product creation for Circular Economy. Engineers in strategic product planning need to proactively predict the quantity, quality, and timing of secondary materials and returned components. For example, products with high recycled content will only be economically sustainable if the material is actually available in the future product life. Our assumption is that Information Circularity Assistance enables decision makers to incorporate insights from extreme data – high-volume, high-velocity, heterogeneous and distributed data from the product life – into product creation through intelligent Digital Twins. Artificial Intelligence can help to derive sustainable actions in favor of circular products by processing extreme data and enriching it with expert knowledge. The research contributes in three key dimensions. First, a comprehensive literature review is conducted. This review covers concepts of intelligence in Scenario-Technique for strategic product planning, Digital Twin-based analysis of extreme data and relevant technologies from Data Science and Artificial Intelligence. In all areas, the state of the art and emerging trends are identified. Secondly, the study identifies information needs along the steps of the Scenario-Technique and information offerings based on Digital Twins. The concept of Information Circularity Assistance results from the coupling of these demands and offerings, extending the Scenario-Technique beyond traditional expert-based methods. Third, we extend existing Digital Twin methods used in circularity and discuss the deployment of Data Science and Artificial Intelligence algorithms within the product creation process. Our approach uses extreme data to provide a strategic advantage in optimizing product life cycle planning, which is illustrated by two sample applications. The aim is to provide Information Circularity Assistance that will support experienced product planners, developers, and decision makers in the future.}},
  author       = {{Gräßler, Iris and Weyrich, Michael and Pottebaum, Jens and Kamm, Simon}},
  issn         = {{0178-2312}},
  journal      = {{at - Automatisierungstechnik}},
  keywords     = {{Scenario-Technique, Artificial Intelligence, Digital Twin, Large Language Models}},
  number       = {{1}},
  pages        = {{3--21}},
  publisher    = {{Walter de Gruyter GmbH}},
  title        = {{{Information Circularity Assistance based on extreme data}}},
  doi          = {{10.1515/auto-2024-0039}},
  volume       = {{73}},
  year         = {{2025}},
}

@inbook{58822,
  abstract     = {{In 1921, John Wisdom (1904–1993) became a member of Fitzwilliam House, Cambridge, where he read philosophy and attended lectures by G. E. Moore, C. D. Broad, and J. E. McTaggart. He received his BA in 1924, after which he worked for five years at the National Institute of Industrial Psychology. From 1929 to 1934, Wisdom was a Lecturer in the department of logic and metaphysics at the University of St Andrews and a colleague of G. F. Stout. After the publication of his book Interpretation and Analysis (1931) and five articles on “Logical Constructions” in Mind (1931–3), Wisdom became a Lecturer in Moral Sciences in Cambridge and a Fellow of Trinity College. This gave him the opportunity to gain first-hand knowledge of Wittgenstein’s philosophy. Since nothing by Wittgenstein but Tractatus appeared in print for decades, Wisdom’s publications of these years were—mistakenly—read as portents of the new ideas of Wittgenstein himself. The publication of Wittgenstein’s Philosophical Investigations in 1953 brought with it, among other things, the fall of Wisdom’s popularity. }},
  author       = {{Milkov, Nikolay}},
  booktitle    = {{Wittgenstein and Other Philosophers: His Influence on Historical and Contemporary Analytic Philosophers, 2 vol., Volume II}},
  editor       = {{Khani , Ali Hossein  and Kemp , Gary }},
  keywords     = {{elucidation, facts, Frege, language, metaphysics, G. E. Moore, Russell, Stebbing, John Wisdom, Wittgenstein}},
  publisher    = {{Routledge}},
  title        = {{{Wisdom's Wittgenstein}}},
  year         = {{2025}},
}

@article{58885,
  abstract     = {{There have been several attempts to conceptualize and operationalize pedagogical content knowledge (PCK) in the context of teachers' professional competencies. A recent and popular model is the Refined Consensus Model (RCM), which proposes a framework of dispositional competencies (personal PCK—pPCK) that influence more action-related competencies (enacted PCK—ePCK) and vice versa. However, descriptions of the internal structure of pPCK and possible knowledge domains that might develop independently are still limited, being either primarily theoretically motivated or strictly hierarchical and therefore of limited use, for example, for formative feedback and further development of the RCM. Meanwhile, a non-hierarchical differentiation for the ePCK regarding the plan-teach-reflect cycle has emerged. In this study, we present an exploratory computational approach to investigate pre-service teachers' pPCK for a similar non-hierarchical structure using a large dataset of responses to a pPCK questionnaire (N=846). We drew on theoretical foundations and previous empirical findings to achieve interpretability by integrating this external knowledge into our analyses using the Computational Grounded Theory (CGT) framework. The results of a cluster analysis of the pPCK scores indicate the emergence of prototypical groups, which we refer to as competency profiles: (1) a group with low performance, (2) a group with relatively advanced competency in using pPCK to create instructional elements, (3) a group with relatively advanced competency in using pPCK to assess and analyze described instructional elements, and (4) a group with high performance. These groups show tendencies for certain language usage, which we analyze using a structural topic model in a CGT-inspired pattern refinement step. We verify these patterns by demonstrating the ability of a machine learning model to predict the competency profile assignments. Finally, we discuss some implications of the results for the further development of the RCM and their potential usability for an automated formative assessment.}},
  author       = {{Zeller, Jannis and Riese, Josef}},
  issn         = {{1098-2736}},
  journal      = {{Journal of Research in Science Teaching}},
  keywords     = {{computational grounded theory, language analysis, machine learning, pedagogical content knowledge, unsupervised learning}},
  title        = {{{Competency Profiles of PCK Using Unsupervised Learning: What Implications for the Structures of pPCK Emerge From Non-Hierarchical Analyses?}}},
  doi          = {{10.1002/tea.70001}},
  year         = {{2025}},
}

@inproceedings{60958,
  abstract     = {{Large Language Models (LLMs) excel in understanding, generating, and processing human language, with growing adoption in process mining. Process mining relies on event logs that capture explicit process knowledge; however, knowledge-intensive processes (KIPs) in domains such as healthcare and product development depend on tacit knowledge, which is often absent from event logs. To bridge this gap, this study proposes a LLM-based framework for mobilizing tacit process knowledge and enriching event logs. A proof-of-concept is demonstrated using a KIP-specific LLM-driven conversational agent built on GPT-4o. The results indicate that LLMs can capture tacit process knowledge through targeted queries and systematically integrate it into event logs. This study presents a novel approach combining LLMs, knowledge management, and process mining, advancing the understanding and management of KIPs by enhancing knowledge accessibility and documentation.}},
  author       = {{Brennig, Katharina}},
  booktitle    = {{AMCIS 2025 Proceedings. 11.}},
  keywords     = {{Process Mining, Large Language Model, Knowledge Management, Knowledge-Intensive Process, Tacit Knowledge}},
  location     = {{Montréal}},
  title        = {{{Revealing the Unspoken: Using LLMs to Mobilize and Enrich Tacit Knowledge in Event Logs of Knowledge-Intensive Processes}}},
  year         = {{2025}},
}

@inbook{62701,
  abstract     = {{Learning  continuous  vector  representations  for  knowledge graphs has signiﬁcantly improved state-of-the-art performances in many challenging tasks. Yet, deep-learning-based models are only post-hoc and locally explainable. In contrast, learning Web Ontology Language (OWL) class  expressions  in  Description  Logics  (DLs)  is  ante-hoc  and  globally explainable. However, state-of-the-art learners have two well-known lim-itations:  scaling  to  large  knowledge  graphs  and  handling  missing  infor-mation.  Here,  we  present  a  decision-tree-based  learner  (tDL)  to  learn Web  Ontology  Languages  (OWLs)  class  expressions  over  large  knowl-edge graphs, while imputing missing triples. Given positive and negative example individuals, tDL  ﬁrstly constructs unique OWL expressions in .SHOIN from  concise  bounded  descriptions  of  individuals.  Each  OWL class expression is used as a feature in a binary classiﬁcation problem to represent input individuals. Thereafter, tDL  ﬁts a CART decision tree to learn Boolean decision rules distinguishing positive examples from nega-tive examples. A ﬁnal OWL expression in.SHOIN is built by traversing the  built  CART  decision  tree  from  the  root  node  to  leaf  nodes  for  each positive example. By this, tDL  can learn OWL class expressions without exploration, i.e., the number of queries to a knowledge graph is bounded by the number of input individuals. Our empirical results show that tDL outperforms  the  current state-of-the-art  models  across datasets. Impor-tantly, our experiments over a large knowledge graph (DBpedia with 1.1 billion triples) show that tDL  can eﬀectively learn accurate OWL class expressions,  while  the  state-of-the-art  models  fail  to  return  any  results. Finally,  expressions  learned  by  tDL  can  be  seamlessly  translated  into natural language explanations using a pre-trained large language model and a DL verbalizer.}},
  author       = {{Demir, Caglar and Yekini, Moshood and Röder, Michael and Mahmood, Yasir and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Lecture Notes in Computer Science}},
  isbn         = {{9783032060655}},
  issn         = {{0302-9743}},
  keywords     = {{Decision Tree, OWL Class Expression Learning, Description Logic, Knowledge Graph, Large Language Model, Verbalizer}},
  location     = {{Porto, Portugal}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Tree-Based OWL Class Expression Learner over Large Graphs}}},
  doi          = {{10.1007/978-3-032-06066-2_29}},
  year         = {{2025}},
}

@article{57892,
  abstract     = {{The present paper discusses the extent to which Large Language Models (LLMs) may affect the scientific enterprise, reinforcing or mitigating existing structural inequalities expressed by the Matthew Effect and introducing a “bot delusion” in academia. In a theory-led thought experiment, we first focus on the academic publication and citation system and develop three scenarios of the anticipated consequences of using LLMs: reproducing content and status quo (Scenario 1), enabling content coherence evaluation (Scenario 2) and content evaluation (Scenario 3). Second, we discuss the interaction between the use of LLMs and academic (counter)norms for citation selection and their impact on the publication and citation system. Finally, we introduce communal counter-norms to capture academics’ loyal citation behavior and develop three future scenarios that academia may face when LLMs are widely used in the research process, namely status quo future of science, mixed-access future, and open science future.}},
  author       = {{Wieczorek, Oliver and Steinhardt, Isabel and Schmidt, Rebecca and Mauermeister, Sylvi and Schneijderberg, Christian}},
  issn         = {{0016-3287}},
  journal      = {{Futures}},
  keywords     = {{Large Language Models, Matthew Effect, Academic Publishing and Citation Systems, Scientific Norms, Thought Experiment}},
  publisher    = {{Elsevier BV}},
  title        = {{{The Bot Delusion. Large language models and anticipated consequences for academics’ publication and citation behavior}}},
  doi          = {{10.1016/j.futures.2024.103537}},
  volume       = {{166}},
  year         = {{2024}},
}

@article{51270,
  abstract     = {{This study investigates the teaching methods that future teachers of German as a foreign language use in cultural mediation. Utilizing a qualitative and hermeneutic approach, it analyzes data from a teacher-training module of the International Master's in German as a Foreign/Second Language that Friedrich Schiller University Jena in Germany offers. Participants taught an online course to students from a Japanese university, which focused on cultural learning. Within the teacher training module, the participants discussed their lesson plans, conducted classes, and subsequently reflected on their teaching practices by exploring and critiquing the practical application of their teaching skills. The analysis, which was conducted using qualitative evaluative content analysis following Kuckartz's (2018) approach, revealed a preference for distributive/instructive methods, with some recognition of interactive and collaborative methods. A tendency towards both repetitive and reflective practices was evident, with a predominant focus on content that was specific to a supposed target culture rather than intercultural or transcultural content. The study highlights the need to balance knowledge transmission with the development of critical and reflective skills in cultural mediation. It emphasizes the importance of incorporating collaborative and interactive methods, which promote a critical attitude that is necessary in language teachers and learners. In conclusion, this study advocates adaptive and reflective teaching as an essential component in the training of future language teachers in globalized contexts.}},
  author       = {{Treder, Alexandra}},
  journal      = {{Revista Lengua y Cultura}},
  keywords     = {{cultural mediation, teacher training, foreign language teaching, teaching methods, teaching practice}},
  number       = {{10}},
  pages        = {{56–68}},
  publisher    = {{Universidad Autónoma del Estado de Hidalgo, Mexico}},
  title        = {{{Métodos de la mediación de cultura en el entrenamiento práctico de futuros/as profesores/as de Alemán como Lengua Extranjera}}},
  doi          = {{https://doi.org/10.29057/lc.v5i10.12379}},
  volume       = {{5}},
  year         = {{2024}},
}

@inproceedings{56983,
  abstract     = {{Detecting the veracity of a statement automatically is a challenge the world is grappling with due to the vast amount of data spread across the web. Verifying a given claim typically entails validating it within the framework of supporting evidence like a retrieved piece of text. Classifying the stance of the text with respect to the claim is called stance classification. Despite advancements in automated fact-checking, most systems still rely on a substantial quantity of labeled training data, which can be costly. In this work, we avoid the costly training or fine-tuning of models by reusing pre-trained large language models together with few-shot in-context learning. Since we do not train any model, our approach ExPrompt is lightweight, demands fewer resources than other stance classification methods and can serve as a modern baseline for future developments. At the same time, our evaluation shows that our approach is able to outperform former state-of-the-art stance classification approaches regarding accuracy by at least 2 percent. Our scripts and data used in this paper are available at https://github.com/dice-group/ExPrompt.}},
  author       = {{Qudus, Umair and Röder, Michael and Vollmers, Daniel and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Proceedings of the 33rd ACM International Conference on Information and Knowledge Management}},
  isbn         = {{79-8-4007-0436-9/24/10}},
  keywords     = {{Stance Classification, Few-shot in-context learning, Pre-trained large language models}},
  location     = {{Boise, ID, USA}},
  pages        = {{3994 -- 3999}},
  publisher    = {{ACM}},
  title        = {{{ExPrompt: Augmenting Prompts Using Examples as Modern Baseline for Stance Classification}}},
  doi          = {{10.1145/3627673.3679923}},
  volume       = {{9}},
  year         = {{2024}},
}

@article{53801,
  abstract     = {{In this study, we evaluate the impact of gender-biased data from German-language physician reviews on the fairness of fine-tuned language models. For two different downstream tasks, we use data reported to be gender biased and aggregate it with annotations. First, we propose a new approach to aspect-based sentiment analysis that allows identifying, extracting, and classifying implicit and explicit aspect phrases and their polarity within a single model. The second task we present is grade prediction, where we predict the overall grade of a review on the basis of the review text. For both tasks, we train numerous transformer models and evaluate their performance. The aggregation of sensitive attributes, such as a physician’s gender and migration background, with individual text reviews allows us to measure the performance of the models with respect to these sensitive groups. These group-wise performance measures act as extrinsic bias measures for our downstream tasks. In addition, we translate several gender-specific templates of the intrinsic bias metrics into the German language and evaluate our fine-tuned models. Based on this set of tasks, fine-tuned models, and intrinsic and extrinsic bias measures, we perform correlation analyses between intrinsic and extrinsic bias measures. In terms of sensitive groups and effect sizes, our bias measure results show different directions. Furthermore, correlations between measures of intrinsic and extrinsic bias can be observed in different directions. This leads us to conclude that gender-biased data does not inherently lead to biased models. Other variables, such as template dependency for intrinsic measures and label distribution in the data, must be taken into account as they strongly influence the metric results. Therefore, we suggest that metrics and templates should be chosen according to the given task and the biases to be assessed. }},
  author       = {{Kersting, Joschka and Maoro, Falk and Geierhos, Michaela}},
  issn         = {{0169-023X}},
  journal      = {{Data & Knowledge Engineering}},
  keywords     = {{Language model fairness, Aspect phrase classification, Grade prediction, Physician reviews}},
  publisher    = {{Elsevier}},
  title        = {{{Towards comparable ratings: Exploring bias in German physician reviews}}},
  doi          = {{10.1016/j.datak.2023.102235}},
  volume       = {{148}},
  year         = {{2023}},
}

@inproceedings{52865,
  abstract     = {{This paper addresses new challenges of detecting campaigns in social media, which emerged with the rise of Large Language Models (LLMs). LLMs particularly challenge algorithms focused on the temporal analysis of topical clusters. Simple similarity measures can no longer capture and map campaigns that were previously broadly similar in content. Herein, we analyze whether the classification of messages over time can be profitably used to rediscover poorly detectable campaigns at the content level. Thus, we evaluate classical classifiers and a new method based on siamese neural networks. Our results show that campaigns can be detected despite the limited reliability of the classifiers as long as they are based on a large amount of simultaneously spread artificial content.}},
  author       = {{Grimme, Britta and Pohl, Janina and Winkelmann, Hendrik and Stampe, Lucas and Grimme, Christian}},
  booktitle    = {{Disinformation in Open Online Media: 5th Multidisciplinary International Symposium, MISDOOM 2023, Amsterdam, The Netherlands, November 21–22, 2023, Proceedings}},
  isbn         = {{978-3-031-47895-6}},
  keywords     = {{Social Media, Campaign Detection, Large Language Models, Siamese Neural Networks}},
  pages        = {{72–87}},
  publisher    = {{Springer-Verlag}},
  title        = {{{Lost in Transformation: Rediscovering LLM-Generated Campaigns in Social Media}}},
  doi          = {{10.1007/978-3-031-47896-3_6}},
  year         = {{2023}},
}

@inproceedings{27507,
  abstract     = {{Accurate real estate appraisal is essential in decision making processes of financial institutions, governments, and trending real estate platforms like Zillow. One of the most important factors of a property’s value is its location. However, creating accurate quantifications of location remains a challenge. While traditional approaches rely on Geographical Information Systems (GIS), recently unstructured data in form of images was incorporated in the appraisal process, but text data remains an untapped reservoir. Our study shows that using text data in form of geolocated Wikipedia articles can increase predictive performance over traditional GIS-based methods by 8.2% in spatial out-of-sample validation. A framework to automatically extract geographically weighted vector representations for text is established and used alongside traditional structural housing features to make predictions and to uncover local patterns on sale price for real estate transactions between 2015 and 2020 in Allegheny County, Pennsylvania.}},
  author       = {{Heuwinkel, Tim and Kucklick, Jan-Peter and Müller, Oliver}},
  booktitle    = {{55th Annual Hawaii International Conference on System Sciences (HICSS-55)}},
  keywords     = {{Real Estate Appraisal, Text Regression, Natural Language Processing (NLP), Location Intelligence, Wikipedia}},
  location     = {{Virtual}},
  title        = {{{Using Geolocated Text to Quantify Location in Real Estate Appraisal}}},
  year         = {{2022}},
}

@inproceedings{31054,
  abstract     = {{This paper aims at discussing past limitations set in sentiment analysis research regarding explicit and implicit mentions of opinions. Previous studies have regularly neglected this question in favor of methodical research on standard-datasets. Furthermore, they were limited to linguistically less-diverse domains, such as commercial product reviews. We face this issue by annotating a German-language physician review dataset that contains numerous implicit, long, and complex statements that indicate aspect ratings, such as the physician’s friendliness. We discuss the nature of implicit statements and present various samples to illustrate the challenge described.}},
  author       = {{Kersting, Joschka and Bäumer, Frederik Simon}},
  booktitle    = {{Proceedings of the Fourteenth International Conference on Pervasive Patterns and Applications (PATTERNS 2022): Special Track AI-DRSWA: Maturing Artificial Intelligence - Data Science for Real-World Applications}},
  editor       = {{Kersting, Joschka}},
  keywords     = {{Sentiment analysis, Natural language processing, Aspect phrase extraction}},
  location     = {{Barcelona, Spain}},
  pages        = {{5--9}},
  publisher    = {{IARIA}},
  title        = {{{Implicit Statements in Healthcare Reviews: A Challenge for Sentiment Analysis}}},
  year         = {{2022}},
}

@article{28349,
  abstract     = {{Das Auftreten der COVID-19-Pandemie stellt Fremdsprachenkurse vielerorts vor Herausforderungen. Unter Zuhilfenahme diverser digitaler Tools werden nicht nur Lernmaterialien online geteilt, sondern auch die Interaktion zwischen Lehrenden und Lernenden sowie der Lernenden untereinander in den virtuellen Raum verlagert. Qualitative Interviews mit den Beteiligten erfassen, wie diese mit den Herausforderungen videogestützten Sprachunterrichts umgehen und welche Strategien sie wählen, um Sprachenlernen zu ermöglichen. Die Ergebnisse zeigen auf, wo seitens der Kursorganisation und -durchführung Handlungsbedarf besteht.
-----
The rise of the COVID-19 pandemic challenges the teaching and learning of foreign languages at many institutions. The implementation of various digital tools aids not only the online sharing of learning materials, but also shifts teacher-learner and learner-learner interaction to the virtual space. Via qualitative interviews, this study examines how both teachers and learners handle the challenges of language instruction based on videoconferences, and what strategies they employ to enable language learning. The results highlight areas in need of improvement in terms of course organization and facilitation.}},
  author       = {{Drumm, Sandra and Müller, Mareike and Stenzel, Nadja}},
  issn         = {{2511-0853}},
  journal      = {{Informationen Deutsch als Fremdsprache}},
  keywords     = {{German language courses at university, interaction, digital space, language learning/teaching via videoconference}},
  number       = {{5}},
  pages        = {{496--515}},
  title        = {{{Digitale Räume geben und nehmen: Unterrichtsinteraktion in DSH-Kursen während der COVID-19-Pandemie}}},
  doi          = {{10.1515/infodaf-2021-0069}},
  volume       = {{48}},
  year         = {{2021}},
}

@inproceedings{26049,
  abstract     = {{Content is the new oil. Users consume billions of terabytes a day while surfing on news sites or blogs, posting on social media sites, and sending chat messages around the globe. While content is heterogeneous, the dominant form of web content is text. There are situations where more diversity needs to be introduced into text content, for example, to reuse it on websites or to allow a chatbot to base its models on the information conveyed rather than of the language used. In order to achieve this, paraphrasing techniques have been developed: One example is Text spinning, a technique that automatically paraphrases text while leaving the intent intact. This makes it easier to reuse content, or to change the language generated by the bot more human. One method for modifying texts is a combination of translation and back-translation. This paper presents NATTS, a naive approach that uses transformer-based translation models to create diversified text, combining translation steps in one model. An advantage of this approach is that it can be fine-tuned and handle technical language.}},
  author       = {{Bäumer, Frederik Simon and Kersting, Joschka and Denisov, Sergej and Geierhos, Michaela}},
  booktitle    = {{PROCEEDINGS OF THE INTERNATIONAL CONFERENCES ON WWW/INTERNET 2021 AND APPLIED COMPUTING 2021}},
  keywords     = {{Software Requirements, Natural Language Processing, Transfer Learning, On-The-Fly Computing}},
  location     = {{Lisbon, Portugal}},
  pages        = {{221----225}},
  publisher    = {{IADIS}},
  title        = {{{IN OTHER WORDS: A NAIVE APPROACH TO TEXT SPINNING}}},
  year         = {{2021}},
}

@inproceedings{21727,
  abstract     = {{Platform-based business models underlie the success of many of today’s largest, fastest-growing, and most disruptive companies. Despite the success of prominent examples, such as Uber and Airbnb, creating a profitable platform ecosystem presents a key challenge for many companies across all industries. Although research provides knowledge about platforms’ different value drivers (e.g., network effects), companies that seek to transform their current business model into a platform-based one lack an artifact to reduce knowledge boundaries, collaborate effectively, and cope with the complexities and dynamics of platform ecosystems. We address this challenge by developing two artifacts and combining research from variability modeling, business model dependencies, and system dynamics. This paper presents a design science research approach to develop the platform ecosystem modeling language and the platform ecosystem development tool that support researcher and practitioner by visualizing and simulating platform ecosystems. }},
  author       = {{Vorbohle, Christian and Gottschalk, Sebastian}},
  booktitle    = {{Proceedings of the 29th European Conference on Information Systems (ECIS)}},
  keywords     = {{Platform Ecosystems, Platform Ecosystem Modeling Language, Platform Ecosystem Development Tool, Business Models, Design Science}},
  location     = {{Virtual Conference/Workshop}},
  publisher    = {{AIS}},
  title        = {{{Towards Visualizing and Simulating Business Models in Dynamic Platform Ecosystems }}},
  year         = {{2021}},
}

@article{20212,
  abstract     = {{Ideational impact refers to the uptake of a paper's ideas and concepts by subsequent research. It is defined in stark contrast to total citation impact, a measure predominantly used in research evaluation that assumes that all citations are equal. Understanding ideational impact is critical for evaluating research impact and understanding how scientific disciplines build a cumulative tradition. Research has only recently developed automated citation classification techniques to distinguish between different types of citations and generally does not emphasize the conceptual content of the citations and its ideational impact. To address this problem, we develop Deep Content-enriched Ideational Impact Classification (Deep-CENIC) as the first automated approach for ideational impact classification to support researchers' literature search practices. We evaluate Deep-CENIC on 1,256 papers citing 24 information systems review articles from the IT business value domain. We show that Deep-CENIC significantly outperforms state-of-the-art benchmark models. We contribute to information systems research by operationalizing the concept of ideational impact, designing a recommender system for academic papers based on deep learning techniques, and empirically exploring the ideational impact of the IT business value domain.
}},
  author       = {{Prester, Julian and Wagner, Gerit and Schryen, Guido and Hassan, Nik Rushdi}},
  journal      = {{Decision Support Systems}},
  keywords     = {{Ideational impact, citation classification, academic recommender systems, natural language processing, deep learning, cumulative tradition}},
  number       = {{January}},
  title        = {{{Classifying the Ideational Impact of Information Systems Review Articles: A Content-Enriched Deep Learning Approach}}},
  volume       = {{140}},
  year         = {{2021}},
}

@article{31680,
  author       = {{Scharlau, Ingrid and Karsten, A. and Rohlfing, Katharina J.}},
  issn         = {{2030-1006}},
  journal      = {{Journal of Writing Research}},
  keywords     = {{Literature and Literary Theory, Linguistics and Language, Language and Linguistics, Education}},
  number       = {{vol. 12 issue 3}},
  pages        = {{493--529}},
  publisher    = {{ARLE (International Association for Research in L1 Education)}},
  title        = {{{Building, emptying out, or dreaming? Action structures and space in students’ metaphors of academic writing}}},
  doi          = {{10.17239/jowr-2021.12.03.01}},
  volume       = {{12}},
  year         = {{2021}},
}

@inproceedings{18686,
  author       = {{Kersting, Joschka and Bäumer, Frederik Simon}},
  booktitle    = {{PROCEEDINGS OF THE INTERNATIONAL CONFERENCE ON APPLIED COMPUTING 2020}},
  keywords     = {{Software Requirements, Natural Language Processing, Transfer Learning, On-The-Fly Computing}},
  location     = {{Lisbon, Portugal}},
  pages        = {{119----123}},
  publisher    = {{IADIS}},
  title        = {{{SEMANTIC TAGGING OF REQUIREMENT DESCRIPTIONS: A TRANSFORMER-BASED APPROACH}}},
  year         = {{2020}},
}

@inproceedings{15580,
  abstract     = {{This paper deals with aspect phrase extraction and classification in sentiment analysis. We summarize current approaches and datasets from the domain of aspect-based sentiment analysis. This domain detects sentiments expressed for individual aspects in unstructured text data. So far, mainly commercial user reviews for products or services such as restaurants were investigated. We here present our dataset consisting of German physician reviews, a sensitive and linguistically complex field. Furthermore, we describe the annotation process of a dataset for supervised learning with neural networks. Moreover, we introduce our model for extracting and classifying aspect phrases in one step, which obtains an F1-score of 80%. By applying it to a more complex domain, our approach and results outperform previous approaches.}},
  author       = {{Kersting, Joschka and Geierhos, Michaela}},
  booktitle    = {{Proceedings of the 12th International Conference on Agents and Artificial Intelligence (ICAART 2020) --  Special Session on Natural Language Processing in Artificial Intelligence (NLPinAI 2020)}},
  keywords     = {{Deep Learning, Natural Language Processing, Aspect-based Sentiment Analysis}},
  location     = {{Valetta, Malta}},
  pages        = {{391----400}},
  publisher    = {{SCITEPRESS}},
  title        = {{{Aspect Phrase Extraction in Sentiment Analysis with Deep Learning}}},
  year         = {{2020}},
}

@article{46149,
  abstract     = {{<jats:p>The paper presents a cross-European survey on teachers and crowdsourcing. The survey examines how familiar language teachers are with the concept of crowdsourcing and addresses their attitude towards including crowdsourcing into language teaching activities. The survey was administrated via an online questionnaire and collected volunteers’ data on: (a) teachers’ experience with organizing crowdsourcing activities for students/pupils, (b) the development of crowdsourced resources and materials as well as (c) teachers’ motivation for participating in or employing crowdsourcing activities. The questionnaire was disseminated in over 30 European countries. The final sample comprises 1129 language teachers aged 20 to 65, mostly working at institutions of tertiary education. The data indicates that many participants are not familiar with the concept of crowdsourcing resulting in a low rate of crowdsourcing activities in the classroom. However, a high percentage of responding teachers is potentially willing to crowdsource teaching materials for the language(s) they teach. They are particularly willing to collaborate with other teachers in the creation of interactive digital learning materials, and to select, edit, and share language examples for exercises or tests. Since the inclusion of crowdsourcing activities in language teaching is still in its initial stage, steps for further research are highlighted.</jats:p>}},
  author       = {{Arhar Holdt, Špela and Zanasi, Lorenzo and Weber, Tassja and Volodina, Elena and Rodosthenous, Christos and Ordulj, Antonia and Miloshevska, Lina and Lazić Konjik, Ivana and Koeva, Svetla and Kasperavičienė, Ramunė and Hatipoglu, Ciler and Fort, Karën and Bago, Petra and Durán-Muñoz, Isabel and Gajek, Elżbieta and Zviel-Girshin, Rina}},
  issn         = {{1849-0379}},
  journal      = {{Rasprave Instituta za hrvatski jezik i jezikoslovlje}},
  keywords     = {{Linguistics and Language, Language and Linguistics}},
  number       = {{1}},
  pages        = {{1--28}},
  publisher    = {{Institute of Croatian Language and Linguistics}},
  title        = {{{Language Teachers and Crowdsourcing}}},
  doi          = {{10.31724/rihjj.46.1.1}},
  volume       = {{46}},
  year         = {{2020}},
}

