@inproceedings{50459,
  abstract     = {{Organizations employ process mining to discover, check, or enhance process models based on data from information systems to improve business processes. Even though process mining is increasingly relevant in academia and organizations, achieving process mining excellence and generating business value through its application is elusive. Maturity models can help to manage interdisciplinary teams in their efforts to plan, implement, and manage process mining in organizations. However, while numerous maturity models on business process management (BPM) are available, recent calls for process mining maturity models indicate a gap in the current knowledge base. We systematically design and develop a comprehensive process mining maturity model that consists of five factors comprising 23 elements, which organizations need to develop to apply process mining sustainably and successfully. We contribute to the knowledge base by the exaptation of existing BPM maturity models, and validate our model through its application to a real-world scenario.}},
  author       = {{Brock, Jonathan and Löhr, Bernd and Brennig, Katharina and Seger, Thilo and Bartelheimer, Christian and von Enzberg, Sebastian and Kühn, Arno and Dumitrescu, Roman}},
  booktitle    = {{European Conference on Information Systems (ECIS)}},
  title        = {{{A Process Mining Maturity Model: Enabling Organizations to Assess and Improve their Process Mining Activities}}},
  year         = {{2023}},
}

@article{45112,
  author       = {{Beverungen, Daniel and Kundisch, Dennis and Mirbabaie, Milad and Müller, Oliver and Schryen, Guido and Trang, Simon Thanh-Nam and Trier, Matthias}},
  journal      = {{Business & Information Systems Engineering}},
  number       = {{4}},
  pages        = {{463 -- 474}},
  title        = {{{Digital Responsibility – a Multilevel Framework for Responsible Digitalization}}},
  doi          = {{10.1007/s12599-023-00822-x}},
  volume       = {{65}},
  year         = {{2023}},
}

@inproceedings{27506,
  abstract     = {{Explainability for machine learning gets more and more important in high-stakes decisions like real estate appraisal. While traditional hedonic house pricing models are fed with hard information based on housing attributes, recently also soft information has been incorporated to increase the predictive performance. This soft information can be extracted from image data by complex models like Convolutional Neural Networks (CNNs). However, these are intransparent which excludes their use for high-stakes financial decisions. To overcome this limitation, we examine if a two-stage modeling approach can provide explainability. We combine visual interpretability by Regression Activation Maps (RAM) for the CNN and a linear regression for the overall prediction. Our experiments are based on 62.000 family homes in Philadelphia and the results indicate that the CNN learns aspects related to vegetation and quality aspects of the house from exterior images, improving the predictive accuracy of real estate appraisal by up to 5.4%.}},
  author       = {{Kucklick, Jan-Peter}},
  booktitle    = {{55th Annual Hawaii International Conference on System Sciences (HICSS-55)}},
  keywords     = {{Explainable Artificial Intelligence (XAI), Regression Activation Maps, Real Estate Appraisal, Convolutional Block Attention Module, Computer Vision}},
  location     = {{Virtual}},
  title        = {{{Visual Interpretability of Image-based Real Estate Appraisal}}},
  year         = {{2022}},
}

@inproceedings{29539,
  abstract     = {{Explainable Artificial Intelligence (XAI) is currently an important topic for the application of Machine Learning (ML) in high-stakes decision scenarios. Related research focuses on evaluating ML algorithms in terms of interpretability. However, providing a human understandable explanation of an intelligent system does not only relate to the used ML algorithm. The data and features used also have a considerable impact on interpretability. In this paper, we develop a taxonomy for describing XAI systems based on aspects about the algorithm and data. The proposed taxonomy gives researchers and practitioners opportunities to describe and evaluate current XAI systems with respect to interpretability and guides the future development of this class of systems.}},
  author       = {{Kucklick, Jan-Peter}},
  booktitle    = {{Wirtschaftsinformatik 2022 Proceedings}},
  keywords     = {{Explainable Artificial Intelligence, XAI, Interpretability, Decision Support Systems, Taxonomy}},
  location     = {{Nürnberg (online)}},
  title        = {{{Towards a model- and data-focused taxonomy of XAI systems}}},
  year         = {{2022}},
}

@article{32866,
  author       = {{Shollo, Arisa and Hopf, Konstantin and Thiess, Tiemo and Müller, Oliver}},
  issn         = {{0963-8687}},
  journal      = {{The Journal of Strategic Information Systems}},
  keywords     = {{Information Systems and Management, Information Systems, Management Information Systems}},
  number       = {{3}},
  publisher    = {{Elsevier BV}},
  title        = {{{Shifting ML value creation mechanisms: A process model of ML value creation}}},
  doi          = {{10.1016/j.jsis.2022.101734}},
  volume       = {{31}},
  year         = {{2022}},
}

@article{35620,
  abstract     = {{Deep learning models fuel many modern decision support systems, because they typically provide high predictive performance. Among other domains, deep learning is used in real-estate appraisal, where it allows to extend the analysis from hard facts only (e.g., size, age) to also consider more implicit information about the location or appearance of houses in the form of image data. However, one downside of deep learning models is their intransparent mechanic of decision making, which leads to a trade-off between accuracy and interpretability. This limits their applicability for tasks where a justification of the decision is necessary. Therefore, in this paper, we first combine different perspectives on interpretability into a multi-dimensional framework for a socio-technical perspective on explainable artificial intelligence. Second, we measure the performance gains of using multi-view deep learning which leverages additional image data (satellite images) for real estate appraisal. Third, we propose and test a novel post-hoc explainability method called Grad-Ram. This modified version of Grad-Cam mitigates the intransparency of convolutional neural networks (CNNs) for predicting continuous outcome variables. With this, we try to reduce the accuracy-interpretability trade-off of multi-view deep learning models. Our proposed network architecture outperforms traditional hedonic regression models by 34% in terms of MAE. Furthermore, we find that the used satellite images are the second most important predictor after square feet in our model and that the network learns interpretable patterns about the neighborhood structure and density.}},
  author       = {{Kucklick, Jan-Peter and Müller, Oliver}},
  issn         = {{2158-656X}},
  journal      = {{ACM Transactions on Management Information Systems}},
  keywords     = {{Interpretability, Convolutional Neural Network, Accuracy-Interpretability Trade-Of, Real Estate Appraisal, Hedonic Pricing, Grad-Ram}},
  publisher    = {{Association for Computing Machinery (ACM)}},
  title        = {{{Tackling the Accuracy–Interpretability Trade-off: Interpretable Deep Learning Models for Satellite Image-based Real Estate Appraisal}}},
  doi          = {{10.1145/3567430}},
  year         = {{2022}},
}

@inproceedings{36912,
  abstract     = {{Existing process mining methods are primarily designed for processes that have reached a high degree of digitalization and standardization. In contrast, the literature has only begun to discuss how process mining can be applied to knowledge-intensive processes—such as product innovation processes—that involve creative activities, require organizational flexibility, depend on single actors’ decision autonomy, and target process-external goals such as customer satisfaction. Due to these differences, existing Process Mining methods cannot be applied out-of-the-box to analyze knowledge-intensive processes. In this paper, we employ Action Design Research (ADR) to design and evaluate a process mining approach for knowledge-intensive processes. More specifically, we draw on the two processes of product innovation and engineer-to-order in manufacturing contexts. We collected data from 27 interviews and conducted 49 workshops to evaluate our IT artifact at different stages in the ADR process. From a theoretical perspective, we contribute five design principles and a conceptual artifact that prescribe how process mining ought to be designed for knowledge-intensive processes in manufacturing. From a managerial perspective, we demonstrate how enacting these principles enables their application in practice.}},
  author       = {{Löhr, Bernd and Brennig, Katharina and Bartelheimer, Christian and Beverungen, Daniel and Müller, Oliver}},
  booktitle    = {{International Conference on Business Process Management}},
  isbn         = {{978-3-031-16103-2}},
  title        = {{{Process Mining of Knowledge-Intensive Processes: An Action Design Research Study in Manufacturing}}},
  doi          = {{10.1007/978-3-031-16103-2_18}},
  year         = {{2022}},
}

@inproceedings{42631,
  abstract     = {{In recent years, many cases of deep neural networks failing dramatically when faced with adversarial or real-world examples have been reported. Such failures, which are quite hard to detect, are often related to a generalization problem known as shortcut learning. Yet, with state-of-the-art transformer models now being ubiquitous in financial text mining, one cannot help but wonder how reliable the results conveyed in the ever-growing literature genuinely are. Against this background, we expose, in this work, how vulnerable contemporary financial text mining approaches are to shortcut learning. Focussing on the common learning task of financial sentiment classification, we assess, using two entity-based sampling strategies and our publicly-available dataset, the discrepancies between i.i.d. and o.o.d. performance estimates of four transformer models. Our results reveal that o.o.d. performance estimates are consistently weaker than those of their i.i.d. counterparts, with the error rate increasing by as much as 29.7%, thus, demonstrating how this issue can, when overlooked, lead to misleading evaluations. Moreover, we show how additional preprocessing steps, such as entity removal and vocabulary filtering, can help reduce the effects of shortcut learning by filtering out entity-related linguistic cues.}},
  author       = {{Caron, Matthew}},
  booktitle    = {{2022 IEEE International Conference on Big Data (Big Data)}},
  location     = {{Osaka, Japan}},
  publisher    = {{IEEE}},
  title        = {{{Shortcut Learning in Financial Text Mining: Exposing the Overly Optimistic Performance Estimates of Text Classification Models under Distribution Shift}}},
  doi          = {{10.1109/bigdata55660.2022.10020933}},
  year         = {{2022}},
}

@inproceedings{25113,
  abstract     = {{Our world is more connected than ever before. Sadly, however, this highly connected world has made it easier to bully, insult, and propagate hate speech on the cyberspace. Even though researchers and companies alike have started investigating this real-world problem, the question remains as to why users are increasingly being exposed to hate and discrimination online. In fact, the noticeable and persistent increase in harmful language on social media platforms indicates that the situation is, actually, only getting worse. Hence, in this work, we show that contemporary ML methods can help tackle this challenge in an accurate and cost-effective manner. Our experiments demonstrate that a universal approach combining transfer learning methods and state-of-the-art Transformer architectures can trigger the efficient development of toxic language detection models. Consequently, with this universal approach, we provide platform providers with a simplistic approach capable of enabling the automated moderation of user-generated content, and as a result, hope to contribute to making the web a safer place.}},
  author       = {{Caron, Matthew and Bäumer, Frederik S. and Müller, Oliver}},
  booktitle    = {{55th Hawaii International Conference on System Sciences (HICSS)}},
  location     = {{Online}},
  title        = {{{Towards Automated Moderation: Enabling Toxic Language Detection with Transfer Learning and Attention-Based Models}}},
  year         = {{2022}},
}

@inproceedings{21204,
  author       = {{Kucklick, Jan-Peter and Müller, Oliver}},
  booktitle    = {{ The AAAI-21 Workshop on Knowledge Discovery from Unstructured Data in Financial Services}},
  title        = {{{A Comparison of Multi-View Learning Strategies for Satellite Image-based Real Estate Appraisal}}},
  year         = {{2021}},
}

@inproceedings{22514,
  author       = {{Kucklick, Jan-Peter and Müller, Jennifer and Beverungen, Daniel and Müller, Oliver}},
  booktitle    = {{European Conference on Information Systems}},
  location     = {{Virtual}},
  title        = {{{Quantifying the Impact of Location Data for Real Estate Appraisal – A GIS-based Deep Learning Approach}}},
  year         = {{2021}},
}

@inbook{32868,
  author       = {{Nagbøl, Per Rådberg and Müller, Oliver and Krancher, Oliver}},
  booktitle    = {{The Next Wave of Sociotechnical Design}},
  isbn         = {{9783030824044}},
  issn         = {{0302-9743}},
  publisher    = {{Springer International Publishing}},
  title        = {{{Designing a Risk Assessment Tool for Artificial Intelligence Systems}}},
  doi          = {{10.1007/978-3-030-82405-1_32}},
  year         = {{2021}},
}

@inproceedings{26812,
  author       = {{Leffrang, Dirk and Müller, Oliver}},
  booktitle    = {{IEEE Workshop on TRust and EXpertise in Visual Analytics}},
  title        = {{{Should I Follow this Model? The Effect of Uncertainty Visualization on the Acceptance of Time Series Forecasts}}},
  doi          = {{10.1109/TREX53765.2021.00009}},
  year         = {{2021}},
}

@inproceedings{24547,
  abstract     = {{Over the last years, several approaches for the data-driven estimation of expected possession value (EPV) in basketball and association football (soccer) have been proposed. In this paper, we develop and evaluate PIVOT: the first such framework for team handball. Accounting for the fast-paced, dynamic nature and relative data scarcity of hand- ball, we propose a parsimonious end-to-end deep learning architecture that relies solely on tracking data. This efficient approach is capable of predicting the probability that a team will score within the near future given the fine-grained spatio-temporal distribution of all players and the ball over the last seconds of the game. Our experiments indicate that PIVOT is able to produce accurate and calibrated probability estimates, even when trained on a relatively small dataset. We also showcase two interactive applications of PIVOT for valuing actual and counterfactual player decisions and actions in real-time.}},
  author       = {{Müller, Oliver and Caron, Matthew and Döring, Michael and Heuwinkel, Tim and Baumeister, Jochen}},
  booktitle    = {{8th Workshop on Machine Learning and Data Mining for Sports Analytics (ECML PKDD 2021)}},
  keywords     = {{expected possession value, handball, tracking data, time series classification, deep learning}},
  location     = {{Online}},
  title        = {{{PIVOT: A Parsimonious End-to-End Learning Framework for Valuing Player Actions in Handball using Tracking Data}}},
  year         = {{2021}},
}

@inproceedings{25029,
  abstract     = {{In early 2021, the finance world was taken by storm by the dramatic price surge of the GameStop Corp. stock. This rise is being, at least in part, attributed to a group of Redditors belonging to the now-famous r/wallstreetbets (WSB) subreddit group. In this work, we set out to address if user activity on the WSB subreddit is associated with the trading volume of the GME stock. Leveraging a unique dataset containing more than 4.9 million WSB posts and comments, we assert that user activity is associated with the trading volume of the GameStop stock. We further show that posts have a significantly higher predictive power than comments and are especially helpful for predicting unusually high trading volume. Lastly, as recent events have shown, we believe that these findings have implications for retail and institutional investors, trading platforms, and policymakers, as these can have disruptive potential.}},
  author       = {{Caron, Matthew and Gulenko, Maryna and Müller, Oliver}},
  booktitle    = {{42nd International Conference on Information Systems (ICIS 2021)}},
  keywords     = {{Retail investors, GameStop, Social Networks, Reddit, WallStreetBets}},
  location     = {{Austin, Texas}},
  title        = {{{To the Moon! Analyzing the Community of “Degenerates” Engaged in the Surge of the GME Stock}}},
  year         = {{2021}},
}

@inproceedings{17348,
  author       = {{Kucklick, Jan-Peter and Müller, Oliver}},
  booktitle    = {{Symposium on Statistical Challenges in Electronic Commerce Research (SCECR)}},
  title        = {{{Location, location, location: Satellite image-based real-estate  appraisal}}},
  year         = {{2020}},
}

@inproceedings{17140,
  author       = {{Thiess, Tiemo and Müller, Oliver and Tonelli, Lorenzo}},
  booktitle    = {{International Conference on Wirtschaftsinformatik}},
  title        = {{{Design Principles for Explainable Sales Win-Propensity Prediction Systems}}},
  doi          = {{https://doi.org/10.30844/wi_2020_c8-thiess}},
  year         = {{2020}},
}

@inproceedings{17095,
  abstract     = {{In order to sustain their competitive advantage, data driven organizations must continue investing in business intelligence and analytics (BI&A) while mitigating inherent cost increases. Research shows that examining outlays by individual BI&A artifact (e.g. reports, analytics) is necessary, but introduction in practice is cumbersome and adoption is slow. BI&A service-oriented cost allocation (BIASOCA) represents an improvement to this situation. This approach enables to render the BI&A cost pool accountable and improves cost transparency, which leads to a higher BI&A penetration of economically viable applications in organizations. Against this background, this paper aims at designing and implementing BIASOCA in a medium-sized company. To record organizational impact and increase customer acceptance, this study is carried out as action design research (ADR). Our findings indicate improvements in BI&A management from working with consumers to locate cost savings and drivers. After invoicing, consumers’ BI&A awareness increased, releasing resources while also making a better understanding of BIASOCA necessary. We detail how to implement BIASOCA in a real-life setting and the challenges attendant in so doing. Our research contributes to theory and practice with a set of design principles highlighting, besides the accuracy of cost accounting, the importance of collaboration, model comprehensibility and strategic alignment.}},
  author       = {{Grytz, Raphael and Krohn-Grimberghe, Artus and Müller, Oliver}},
  booktitle    = {{European Conference on Information Systems}},
  title        = {{{Business Intelligence & Analytics Cost Accounting: An Action Design Research Approach}}},
  year         = {{2020}},
}

@inproceedings{35660,
  abstract     = {{Effective customer loyalty programs are essential for every company. Small and medium sized brick-and- mortar stores, such as bakeries, butcher and flower shops, often share a common overarching loyalty program, organized by a third-party provider. Furthermore, these small shops have limited resources and often cannot afford complex BI tools. Out of these reasons we investigated how traditional brick-and- mortar stores can benefit from an expansion of service functionalities of a loyalty card provider. To answer this question, we cooperated with a cross-industry customer loyalty program in a polycentric region. The loyalty program was transformed from simple card-based solution to a mobile app for customers and a web- application for shop owners. The new solution offers additional BI services for performing data analytics and strengthening the position of brick-and-mortar stores. Participating shops can work together in order to increase sales and align marketing campaigns. Therefore, shopping data from 12 years, 55 shops, and 19,000 customers was analyzed.}},
  author       = {{Kucklick, Jan-Peter and Kamm, Michael Reiner and Schneider, Johannes and vom Brocke, Jan}},
  booktitle    = {{Proceedings of the 53rd Hawaii International Conference on System Sciences}},
  keywords     = {{brick-and-mortar stores, business intelligence, case study, loyalty program}},
  title        = {{{Extending Loyalty Programs with BI Functionalities A Case Study for Brick-and-Mortar Stores}}},
  year         = {{2020}},
}

@article{35662,
  abstract     = {{While the analysis and usage of data are increasing in importance, the application of sophisticated BI solutions in small stores is limited by available technical capabilities and financial resources. This study investigates how brick-and-mortar stores can benefit from an expansion of service functionalities of a cross-industry loyalty card provider. Digitalizing the loyalty program created new opportunities, while the analysis of shopping data of 13 years, 19,000 customers, and 55 shops empowered data-based decision support.}},
  author       = {{Kamm, Michael Reiner and Kucklick, Jan-Peter and Schneider, Johannes and vom Brocke, Jan}},
  issn         = {{1058-0530}},
  journal      = {{Information Systems Management}},
  keywords     = {{Customer loyalty, case study, brick-and-mortar stores, business intelligence, loyalty programs}},
  number       = {{4}},
  pages        = {{270--286}},
  publisher    = {{Informa UK Limited}},
  title        = {{{Data mining for small shops: Empowering brick-and-mortar stores through BI functionalities of a loyalty program1}}},
  doi          = {{10.1080/10580530.2020.1855486}},
  volume       = {{38}},
  year         = {{2020}},
}

