@article{45112,
  author       = {{Beverungen, Daniel and Kundisch, Dennis and Mirbabaie, Milad and Müller, Oliver and Schryen, Guido and Trang, Simon Thanh-Nam and Trier, Matthias}},
  journal      = {{Business & Information Systems Engineering}},
  number       = {{4}},
  pages        = {{463 -- 474}},
  title        = {{{Digital Responsibility – a Multilevel Framework for Responsible Digitalization}}},
  doi          = {{10.1007/s12599-023-00822-x}},
  volume       = {{65}},
  year         = {{2023}},
}

@article{23415,
  author       = {{Sperling, Martina and Schryen, Guido}},
  journal      = {{European Journal of Operational Research (EJOR)}},
  number       = {{2}},
  pages        = {{690 -- 705}},
  title        = {{{Decision Support for Disaster Relief: Coordinating Spontaneous Volunteers}}},
  volume       = {{299}},
  year         = {{2022}},
}

@article{20212,
  abstract     = {{Ideational impact refers to the uptake of a paper's ideas and concepts by subsequent research. It is defined in stark contrast to total citation impact, a measure predominantly used in research evaluation that assumes that all citations are equal. Understanding ideational impact is critical for evaluating research impact and understanding how scientific disciplines build a cumulative tradition. Research has only recently developed automated citation classification techniques to distinguish between different types of citations and generally does not emphasize the conceptual content of the citations and its ideational impact. To address this problem, we develop Deep Content-enriched Ideational Impact Classification (Deep-CENIC) as the first automated approach for ideational impact classification to support researchers' literature search practices. We evaluate Deep-CENIC on 1,256 papers citing 24 information systems review articles from the IT business value domain. We show that Deep-CENIC significantly outperforms state-of-the-art benchmark models. We contribute to information systems research by operationalizing the concept of ideational impact, designing a recommender system for academic papers based on deep learning techniques, and empirically exploring the ideational impact of the IT business value domain.
}},
  author       = {{Prester, Julian and Wagner, Gerit and Schryen, Guido and Hassan, Nik Rushdi}},
  journal      = {{Decision Support Systems}},
  keywords     = {{Ideational impact, citation classification, academic recommender systems, natural language processing, deep learning, cumulative tradition}},
  number       = {{January}},
  title        = {{{Classifying the Ideational Impact of Information Systems Review Articles: A Content-Enriched Deep Learning Approach}}},
  volume       = {{140}},
  year         = {{2021}},
}

@article{20844,
  abstract     = {{Review papers are essential for knowledge development in IS. While some are cited twice a day, others accumulate single digit citations over a decade. The magnitude of these differences prompts us to analyze what distinguishes those reviews that have proven to be integral to scientific progress from those that might be considered less impactful. Our results highlight differences between reviews aimed at describing, understanding, explaining, and theory testing. Beyond the control variables, they demonstrate the importance of methodological transparency and the development of research agendas. These insights inform all stakeholders involved in the development and publication of review papers.}},
  author       = {{Wagner, Gerit and Prester, Julian and Roche, Maria and Schryen, Guido and Benlian, Alexander and Paré, Guy and Templier, Mathieu}},
  journal      = {{Information & Management}},
  keywords     = {{Literature review, review papers, scientometric, scientific impact, citation analysis}},
  number       = {{3}},
  title        = {{{Which Factors Affect the Scientific Impact of Review Papers in IS Research? A Scientometric Study}}},
  volume       = {{58}},
  year         = {{2021}},
}

@article{23494,
  author       = {{Stumpe, Miriam and Rößler, David and Schryen, Guido and Kliewer, Natalia}},
  journal      = {{EURO Journal on Transportation and Logistics}},
  title        = {{{Study on Sensitivity of Electric Bus Systems under Simultaneous Optimization of Charging Infrastructure and Vehicle Schedules}}},
  doi          = {{https://doi.org/10.1016/j.ejtl.2021.100049}},
  volume       = {{10}},
  year         = {{2021}},
}

@article{17934,
  author       = {{Wagner, Gerit and Prester, Julian and Schryen, Guido}},
  journal      = {{Communications of the Association for Information Systems}},
  number       = {{1}},
  title        = {{{Exploring the Scientific Impact of Information Systems Design Science Research}}},
  volume       = {{48}},
  year         = {{2021}},
}

@techreport{17019,
  abstract     = {{The scientific impact of research papers is multi-dimensional and can be determined quantitatively by means of citation analysis and qualitatively by means of content analysis. Accounting for the widely acknowledged limitations of pure citation analysis, we adopt a knowledge-based perspective on scientific impact to develop a methodology for content-based citation analysis which allows determining how papers have enabled knowledge development in subsequent research (knowledge impact). As knowledge development differs between research genres, we develop a new knowledgebased citation analysis methodology for the genre of standalone literature reviews (LRs). We apply the suggested methodology to the IS business value domain by manually coding 22 LRs and 1,228 citing papers (CPs) and show that the results challenge the assumption that citations indicate knowledge impact. We derive implications for distinguishing knowledge impact from citation impact in the LR genre. Finally, we develop recommendations for authors of LRs, scientific evaluation committees and editorial boards of journals how to apply and benefit from the suggested methodology, and we discuss its efficiency and automatization.}},
  author       = {{Schryen, Guido and Wagner, Gerit and Benlian, Alexander}},
  keywords     = {{Scientific impact, knowledge impact, content-based citation analysis, methodology}},
  title        = {{{Distinguishing Knowledge Impact from Citation Impact: A Methodology for Analysing Knowledge Impact for the Literature Review Genre}}},
  year         = {{2020}},
}

@inproceedings{17055,
  abstract     = {{Understanding a new literature corpus can be a grueling experience for junior scholars. Nevertheless, corresponding guidelines have not been updated for decades. We contend that the traditional strategy of skimming all papers and reading selected papers afterwards needs to be revised. Therefore, we design a new strategy that guides the overall exploratory process by prioritizing influential papers for initial reading, followed by skimming the remaining papers. Consistent with schemata theory, starting with in-depth reading allows readers to acquire more substantial prior content schemata, which are representa-tive for the literature corpus and useful in the following skimming process. To this end, we develop a prototype that identifies the influential papers from a set of PDFs, which is illustrated in a case study in the IT business value domain. With the new strategy, we envision a more efficient process of exploring unknown literature corpora.}},
  author       = {{Wagner, Gerit and Empl, Philipp and Schryen, Guido}},
  booktitle    = {{28th European Conference on Information Systems (ECIS 2020)}},
  keywords     = {{Reading and skimming, Exploring literature, Review methodology, Design science research, Schemata theory}},
  location     = {{Marrakesh, Morocco}},
  title        = {{{Designing a Novel Strategy for Exploring Literature Corpora}}},
  year         = {{2020}},
}

@article{15414,
  author       = {{Schryen, Guido}},
  journal      = {{Communications of the ACM}},
  number       = {{9}},
  pages        = {{35 -- 37}},
  title        = {{{Integrating Management Science into the HPC Research Ecosystem}}},
  volume       = {{63}},
  year         = {{2020}},
}

@article{15513,
  abstract     = {{This interview is part of the special issue (01/2020) on “High Performance Business Computing” to be published in the journal Business & Information Systems Engineering. The interviewee Utz-Uwe Haus is Senior Research Engineer @ CRAY European Research Lab (CERL)). A bio of him is included at the end of the interview.}},
  author       = {{Schryen, Guido and Kliewer, Natalia and Fink, Andreas}},
  journal      = {{Business & Information Systems Engineering}},
  number       = {{01/2020}},
  pages        = {{21 -- 23}},
  title        = {{{Interview with Utz-Uwe Haus on “High Performance Computing in Economic Environments: Opportunities and Challenges"}}},
  volume       = {{62}},
  year         = {{2020}},
}

@article{15022,
  author       = {{Schryen, Guido}},
  journal      = {{European Journal of Operational Research}},
  number       = {{1}},
  pages        = {{1 -- 18}},
  publisher    = {{Elsevier}},
  title        = {{{Parallel computational optimization in operations research: A new integrative framework, literature review and research directions}}},
  volume       = {{287}},
  year         = {{2020}},
}

@article{16249,
  abstract     = {{Timing plays a crucial role in the context of information security investments. We regard timing in two dimensions, namely the time of announcement in relation to the time of investment and the time of announcement in relation to the time of a fundamental security incident. The financial value of information security investments is assessed by examining the relationship between the investment announcements and their stock market reaction focusing on the two time dimensions. Using an event study methodology, we found that both dimensions influence the stock market return of the investing organization. Our results indicate that (1) after fundamental security incidents in a given industry, the stock price will react more positively to a firm’s announcement of actual information security investments than to announcements of the intention to invest; (2) the stock price will react more positively to a firm’s announcements of the intention to invest after the fundamental security incident compared to before; and (3) the stock price will react more positively to a firm’s announcements of actual information security investments after the fundamental security incident compared to before. Overall, the lowest abnormal return can be expected when the intention to invest is announced before a fundamental information security incident and the highest return when actual investing after a fundamental information security incident in the respective industry.}},
  author       = {{Szubartowicz, Eva and Schryen, Guido}},
  journal      = {{Journal of Information System Security}},
  keywords     = {{Event Study, Information Security, Investment Announcements, Stock Price Reaction, Value of Information Security Investments}},
  number       = {{1}},
  pages        = {{3 -- 31}},
  publisher    = {{Information Institute Publishing, Washington DC, USA}},
  title        = {{{Timing in Information Security: An Event Study on the Impact of Information Security Investment Announcements}}},
  volume       = {{16}},
  year         = {{2020}},
}

@article{11946,
  abstract     = {{Literature reviews (LRs) play an important role in the development of domain knowledge in all fields. Yet, we observe a
lack of insights into the activities with which LRs actually develop knowledge. To address this important gap, we (1)
derive knowledge building activities from the extant literature on LRs, (2) suggest a knowledge-based typology of LRs
that complements existing typologies, and (3) apply the suggested typology in an empirical study that explores how LRs
with different goals and methodologies have contributed to knowledge development. The analysis of 240 LRs published
in 40 renowned IS journals between 2000 and 2014 allows us to draw a detailed picture of knowledge development
achieved by one of the most important genres in the IS field. An overarching contribution of our work is to unify extant
conceptualizations of LRs by clarifying and illustrating how LRs apply different methodologies in a range of knowledge
building activities to achieve their goals with respect to theory.}},
  author       = {{Schryen, Guido and Wagner, Gerit and Benlian, Alexander and Paré, Guy}},
  issn         = {{ 1529-3181}},
  journal      = {{Communications of the AIS}},
  keywords     = {{Literature review, knowledge development, knowledge building activities, knowledge-based typology, information systems research}},
  pages        = {{134--186}},
  title        = {{{A Knowledge Development Perspective on Literature Reviews: Validation of a New Typology in the IS Field}}},
  doi          = {{10.17705/1CAIS.04607}},
  volume       = {{46}},
  year         = {{2020}},
}

@article{14985,
  author       = {{Schryen, Guido and Kliewer, Natalia and Fink, Andreas}},
  journal      = {{Business & Information Systems Engineering}},
  number       = {{1}},
  pages        = {{1--3}},
  title        = {{{High Performance Business Computing}}},
  doi          = {{10.1007/s12599-019-00622-2}},
  volume       = {{62}},
  year         = {{2020}},
}

@article{13175,
  abstract     = {{Today, organizations must deal with a plethora of IT security threats and to ensure smooth and
uninterrupted business operations, firms are challenged to predict the volume of IT security vulnerabilities
and allocate resources for fixing them. This challenge requires decision makers to assess
which system or software packages are prone to vulnerabilities, how many post-release vulnerabilities
can be expected to occur during a certain period of time, and what impact exploits might have.
Substantial research has been dedicated to techniques that analyze source code and detect security
vulnerabilities. However, only limited research has focused on forecasting security vulnerabilities
that are detected and reported after the release of software. To address this shortcoming, we apply
established methodologies which are capable of forecasting events exhibiting specific time series
characteristics of security vulnerabilities, i.e., rareness of occurrence, volatility, non-stationarity,
and seasonality. Based on a dataset taken from the National Vulnerability Database (NVD), we use
the Mean Absolute Error (MAE) and Root Mean Square Error (RMSE) to measure the forecasting
accuracy of single, double, and triple exponential smoothing methodologies, Croston's methodology,
ARIMA, and a neural network-based approach. We analyze the impact of the applied forecasting
methodology on the prediction accuracy with regard to its robustness along the dimensions of the
examined system and software package "operating systems", "browsers" and "office solutions" and
the applied metrics. To the best of our knowledge, this study is the first to analyze the effect
of forecasting methodologies and to apply metrics that are suitable in this context. Our results
show that the optimal forecasting methodology depends on the software or system package, as some
methodologies perform poorly in the context of IT security vulnerabilities, that absolute metrics
can cover the actual prediction error precisely, and that the prediction accuracy is robust within the
two applied forecasting-error metrics.}},
  author       = {{Yasasin, Emrah and Prester, Julian and Wagner, Gerit and Schryen, Guido}},
  issn         = {{0167-4048}},
  journal      = {{Computers & Security}},
  number       = {{January}},
  title        = {{{Forecasting IT Security Vulnerabilities - An Empirical Analysis}}},
  volume       = {{88}},
  year         = {{2020}},
}

@article{5674,
  abstract     = {{In disaster operations management, a challenging task for rescue organizations occurs when they have to assign and schedule their rescue units to emerging incidents under time pressure in order to reduce the overall resulting harm. Of particular importance in practical scenarios is the need to consider collaboration of rescue units. This task has hardly been addressed in the literature. We contribute to both modeling and solving this problem by (1) conceptualizing the situation as a type of scheduling problem, (2) modeling it as a binary linear minimization problem, (3) suggesting a branch-and-price algorithm, which can serve as both an exact and heuristic solution procedure, and (4) conducting computational experiments - including a sensitivity analysis of the effects of exogenous model parameters on execution times and objective value improvements over a heuristic suggested in the literature - for different practical disaster scenarios. The results of our computational experiments show that most problem instances of practically feasible size can be solved to optimality within ten minutes. Furthermore, even when our algorithm is terminated once the first feasible solution has been found, this solution is in almost all cases competitive to the optimal solution and substantially better than the solution obtained by the best known algorithm from the literature. This performance of our branch-and-price algorithm enables rescue organizations to apply our procedure in practice, even when the time for decision making is limited to a few minutes. By addressing a very general type of scheduling problem, our approach applies to various scheduling situations.}},
  author       = {{Rauchecker, Gerhard and Schryen, Guido}},
  journal      = {{European Journal of Operational Research}},
  keywords     = {{OR in disaster relief, disaster operations management, scheduling, branch-and-price}},
  number       = {{1}},
  pages        = {{352 -- 363}},
  publisher    = {{Elsevier}},
  title        = {{{An Exact Branch-and-Price Algorithm for Scheduling Rescue Units during Disaster Response}}},
  volume       = {{272}},
  year         = {{2019}},
}

@article{6512,
  abstract     = {{Scheduling problems are essential for decision making in many academic disciplines, including operations management, computer science, and information systems. Since many scheduling problems are NP-hard in the strong sense, there is only limited research on exact algorithms and how their efficiency scales when implemented on parallel computing architectures. We address this gap by (1) adapting an exact branch-and-price algorithm to a parallel machine scheduling problem on unrelated machines with sequence- and machine-dependent setup times, (2) parallelizing the adapted algorithm by implementing a distributed-memory parallelization with a master/worker approach, and (3) conducting extensive computational experiments using up to 960 MPI processes on a modern high performance computing cluster. With our experiments, we show that the efficiency of our parallelization approach can lead to superlinear speedup but can vary substantially between instances. We further show that the wall time of serial execution can be substantially reduced through our parallelization, in some cases from 94 hours to less than six minutes when our algorithm is executed on 960 processes.}},
  author       = {{Rauchecker, Gerhard and Schryen, Guido}},
  journal      = {{Computers & Operations Research}},
  keywords     = {{parallel machine scheduling with setup times, parallel branch-and-price algorithm, high performance computing, master/worker parallelization}},
  number       = {{104}},
  pages        = {{338--357}},
  publisher    = {{Elsevier}},
  title        = {{{Using High Performance Computing for Unrelated Parallel Machine Scheduling with Sequence-Dependent Setup Times: Development and Computational Evaluation of a Parallel Branch-and-Price Algorithm}}},
  year         = {{2019}},
}

@inproceedings{6514,
  abstract     = {{Recommender Agents (RAs) facilitate consumers’ online purchase decisions for complex, multi-attribute products. As not all combinations of attribute levels can be obtained, users are forced into trade-offs. The exposure of trade-offs in a RA has been found to affect consumers’ perceptions. However, little is known about how different preference elicitation methods in RAs affect consumers by varying degrees of trade-off exposure. We propose a research model that investigates how different levels of trade-off exposure cognitively and affectively influence consumers’ satisfaction with RAs. We operationalize these levels in three different RA types and test our hypotheses in a laboratory experiment with 116 participants. Our results indicate that with increasing tradeoff exposure, perceived enjoyment and perceived control follow an inverted Ushaped relationship. Hence, RAs using preference elicitation methods with medium trade-off exposure yield highest consumer satisfaction. This contributes to the understanding of trade-offs in RAs and provides valuable implications to e-commerce practitioners.}},
  author       = {{Schuhbeck, Veronika and Siegfried, Nils and Dorner, Verena and Benlian, Alexander and Scholz, Michael and Schryen, Guido}},
  booktitle    = {{Proceedings of the 14. Internationale Tagung Wirtschaftsinformatik}},
  keywords     = {{Recommender Agents, Preference Elicitation Method, Trade-off Exposure, Customer Satisfaction}},
  location     = {{Siegen, Germany}},
  pages        = {{55--64}},
  title        = {{{Walking the Middle Path: How Medium Trade-off Exposure Leads to Higher Consumer Satisfaction in Recommender Agents}}},
  year         = {{2019}},
}

@article{14540,
  author       = {{Schryen, Guido and Kliewer, Natalia and Borndörfer, Ralf and Koch, Thorsten}},
  journal      = {{OR News}},
  pages        = {{34--35}},
  title        = {{{High-Performance Business Computing – Parallel Algorithms and Implementations for Solving Problems in Operations Research and Data Analysis}}},
  volume       = {{65}},
  year         = {{2019}},
}

@inproceedings{5675,
  abstract     = {{When responding to natural disasters, professional relief units are often supported by many volunteers which are not affiliated to humanitarian organizations. The effective coordination of these volunteers is crucial to leverage their capabilities and to avoid conflicts with professional relief units. In this paper, we empirically identify key requirements that professional relief units pose on this coordination. Based on these requirements, we suggest a decision model. We computationally solve a real-world instance of the model and empirically validate the computed solution in interviews with practitioners. Our results show that the suggested model allows for solving volunteer coordination tasks of realistic size near-optimally within short time, with the determined solution being well accepted by practitioners. We also describe in this article how the suggested decision support model is integrated in the volunteer coordination system which we develop in joint cooperation with a disaster management authority and a software development company.}},
  author       = {{Rauchecker, Gerhard and Schryen, Guido}},
  booktitle    = {{Proceedings of the 15th International Conference on Information Systems for Crisis Response and Management}},
  keywords     = {{Coordination of spontaneous volunteers, volunteer coordination system, decision support, scheduling optimization model, linear programming}},
  location     = {{Rochester, NY, USA}},
  title        = {{{Decision Support for the Optimal Coordination of Spontaneous Volunteers in Disaster Relief}}},
  year         = {{2018}},
}

