@techreport{47107, author = {{Beverungen, Daniel and zur Heiden, Philipp and Lehrer, Christiane and Trier, Matthias and Bartelheimer, Christian and Bradt, Tobias and Distel, Bettina and Drews, Paul and Ehmke, Jan Fabian and Fill, Hans-Georg and Flath, Christoph M. and Fridgen, Gilbert and Grisold, Thomas and Janiesch, Christian and Janson, Andreas and Krancher, Oliver and Krönung, Julia and Kundisch, Dennis and Márton, Attila and Mirbabaie, Milad and Morana, Stefan and Mueller, Benjamin and Müller, Oliver and Oberländer, Anna Maria and Peters, Christoph and Peukert, Christoph and Reuter-Oppermann, Melanie and Riehle, Dennis M. and Robra-Bissantz, Susanne and Röglinger, Maximilian and Rosenthal, Kristina and Schryen, Guido and Schütte, Reinhard and Strahringer, Susanne and Urbach, Nils and Wessel, Lauri and Zavolokina, Liudmila and Zschech, Patrick}}, pages = {{16}}, publisher = {{Department of Information Systems, Paderborn University}}, title = {{{Implementing Digital Responsibility through Information Systems Research: A Delphi Study of Objectives, Activities, and Challenges in IS Research}}}, year = {{2023}}, } @article{45112, author = {{Beverungen, Daniel and Kundisch, Dennis and Mirbabaie, Milad and Müller, Oliver and Schryen, Guido and Trang, Simon Thanh-Nam and Trier, Matthias}}, journal = {{Business & Information Systems Engineering}}, number = {{4}}, pages = {{463 -- 474}}, title = {{{Digital Responsibility – a Multilevel Framework for Responsible Digitalization}}}, doi = {{https://doi.org/10.1007/s12599-023-00822-x}}, volume = {{65}}, year = {{2023}}, } @inproceedings{37312, abstract = {{Optimal decision making requires appropriate evaluation of advice. Recent literature reports that algorithm aversion reduces the effectiveness of predictive algorithms. However, it remains unclear how people recover from bad advice given by an otherwise good advisor. Previous work has focused on algorithm aversion at a single time point. We extend this work by examining successive decisions in a time series forecasting task using an online between-subjects experiment (N = 87). Our empirical results do not confirm algorithm aversion immediately after bad advice. The estimated effect suggests an increasing algorithm appreciation over time. Our work extends the current knowledge on algorithm aversion with insights into how weight on advice is adjusted over consecutive tasks. Since most forecasting tasks are not one-off decisions, this also has implications for practitioners.}}, author = {{Leffrang, Dirk and Bösch, Kevin and Müller, Oliver}}, booktitle = {{Hawaii International Conference on System Sciences}}, keywords = {{Algorithm aversion, Time series, Decision making, Advice taking, Forecasting}}, title = {{{Do People Recover from Algorithm Aversion? An Experimental Study of Algorithm Aversion over Time}}}, year = {{2023}}, } @inproceedings{50431, abstract = {{Recommender systems now span the entire customer journey. Amid the multitude of diversified experi- ences, immersing in cultural events has become a key aspect of tourism. Cultural events, however, suffer from fleeting lifecycles, evade exact replication, and invariably lie in the future. In addition, their low standardization makes harnessing historical data regarding event content or past patron evaluations intricate. The distinctive traits of events thereby compound the challenge of the cold-start dilemma in event recommenders. Content-based recommendations stand as a viable avenue to alleviate this issue, functioning even in scenarios where item-user information is scarce. Still, the effectiveness of content- based recommendations often hinges on the quality of the data representation they build upon. In this study, we explore an array of cutting-edge uni- and multimodal vision and language foundation models (VL-FMs) for this purpose. Next, we derive content-based recommendations through a straightforward clustering approach that groups akin events together, and evaluate the efficacy of the models through a series of online user experiments across three dimensions: similarity-based evaluation, comparison-based evaluation, and clustering assignment evaluation. Our experiments generated four major findings. First, we found that all VL-FMs consistently outperformed a naive baseline of recommending randomly drawn events. Second, unimodal text-based embeddings were surprisingly on par or in some cases even superior to multimodal embeddings. Third, multimodal embeddings yielded arguably more fine-grained and diverse clusters in comparison to their unimodal counterparts. Finally, we could confirm that cross event interest is indeed reliant on the perceived similarity of events, resonating with the notion of similarity in content-based recommendations. All in all, we believe that leveraging the potential of contemporary FMs for content-based event recommendations would help address the cold-start problem and propel this field of research forward in new and exciting ways.}}, author = {{Halimeh, Haya and Freese, Florian and Müller, Oliver}}, booktitle = {{Workshop on Recommenders in Tourism, co-located with the 17th ACM Conference on Recommender Systems}}, title = {{{Event Recommendations through the Lens of Vision and Language Foundation Models}}}, year = {{2023}}, } @inproceedings{45270, abstract = {{Clinical depression is a serious mental disorder that poses challenges for both personal and public health. Millions of people struggle with depression each year, but for many, the disorder goes undiagnosed or untreated. Over the last decade, early depression detection on social media emerged as an interdisciplinary research field. However, there is still a gap in detecting hesitant, depression-susceptible individuals with minimal direct depressive signals at an early stage. We, therefore, take up this open point and leverage posts from Reddit to fill the addressed gap. Our results demonstrate the potential of contemporary Transformer architectures in yielding promising predictive capabilities for mental health research. Furthermore, we investigate the model’s interpretability using a surrogate and a topic modeling approach. Based on our findings, we consider this work as a further step towards developing a better understanding of mental eHealth and hope that our results can support the development of future technologies.}}, author = {{Halimeh, Haya and Caron, Matthew and Müller, Oliver}}, booktitle = {{Hawaii International Conference on System Sciences}}, keywords = {{Social Media and Healthcare Technology, early depression detection, liwc, mental health, transfer learning, transformer architectures}}, title = {{{Early Depression Detection with Transformer Models: Analyzing the Relationship between Linguistic and Psychology-Based Features}}}, year = {{2023}}, } @inproceedings{37058, abstract = {{Digital technologies have made the line of visibility more transparent, enabling customers to get deeper insights into an organization’s core operations than ever before. This creates new challenges for organizations trying to consistently deliver high-quality customer experiences. In this paper we conduct an empirical analysis of customers’ preferences and their willingness-to-pay for different degrees of process transparency, using the example of digitally-enabled business-to-customer delivery services. Applying conjoint analysis, we quantify customers’ preferences and willingness-to-pay for different service attributes and levels. Our contributions are two-fold: For research, we provide empirical measurements of customers’ preferences and their willingness-to-pay for process transparency, suggesting that more is not always better. Additionally, we provide a blueprint of how conjoint analysis can be applied to study design decisions regarding changing an organization’s digital line of visibility. For practice, our findings enable service managers to make decisions about process transparency and establishing different levels of service quality. }}, author = {{Brennig, Katharina and Müller, Oliver}}, booktitle = {{Hawaii International Conference on System Sciences}}, keywords = {{Digital Services, Line of Visibility, Process Transparency, Customer Preferences, Conjoint Analysis}}, location = {{Lāhainā}}, title = {{{More Isn’t Always Better – Measuring Customers’ Preferences for Digital Process Transparency}}}, year = {{2023}}, } @inbook{50450, author = {{Brennig, Katharina and Benkert, Kay and Löhr, Bernd and Müller, Oliver}}, booktitle = {{Business Process Management Workshops}}, isbn = {{9783031509735}}, issn = {{1865-1348}}, title = {{{Text-Aware Predictive Process Monitoring of Knowledge-Intensive Processes: Does Control Flow Matter?}}}, doi = {{10.1007/978-3-031-50974-2_33}}, year = {{2023}}, } @inproceedings{27507, abstract = {{Accurate real estate appraisal is essential in decision making processes of financial institutions, governments, and trending real estate platforms like Zillow. One of the most important factors of a property’s value is its location. However, creating accurate quantifications of location remains a challenge. While traditional approaches rely on Geographical Information Systems (GIS), recently unstructured data in form of images was incorporated in the appraisal process, but text data remains an untapped reservoir. Our study shows that using text data in form of geolocated Wikipedia articles can increase predictive performance over traditional GIS-based methods by 8.2% in spatial out-of-sample validation. A framework to automatically extract geographically weighted vector representations for text is established and used alongside traditional structural housing features to make predictions and to uncover local patterns on sale price for real estate transactions between 2015 and 2020 in Allegheny County, Pennsylvania.}}, author = {{Heuwinkel, Tim and Kucklick, Jan-Peter and Müller, Oliver}}, booktitle = {{55th Annual Hawaii International Conference on System Sciences (HICSS-55)}}, keywords = {{Real Estate Appraisal, Text Regression, Natural Language Processing (NLP), Location Intelligence, Wikipedia}}, location = {{Virtual}}, title = {{{Using Geolocated Text to Quantify Location in Real Estate Appraisal}}}, year = {{2022}}, } @article{35620, abstract = {{Deep learning models fuel many modern decision support systems, because they typically provide high predictive performance. Among other domains, deep learning is used in real-estate appraisal, where it allows to extend the analysis from hard facts only (e.g., size, age) to also consider more implicit information about the location or appearance of houses in the form of image data. However, one downside of deep learning models is their intransparent mechanic of decision making, which leads to a trade-off between accuracy and interpretability. This limits their applicability for tasks where a justification of the decision is necessary. Therefore, in this paper, we first combine different perspectives on interpretability into a multi-dimensional framework for a socio-technical perspective on explainable artificial intelligence. Second, we measure the performance gains of using multi-view deep learning which leverages additional image data (satellite images) for real estate appraisal. Third, we propose and test a novel post-hoc explainability method called Grad-Ram. This modified version of Grad-Cam mitigates the intransparency of convolutional neural networks (CNNs) for predicting continuous outcome variables. With this, we try to reduce the accuracy-interpretability trade-off of multi-view deep learning models. Our proposed network architecture outperforms traditional hedonic regression models by 34% in terms of MAE. Furthermore, we find that the used satellite images are the second most important predictor after square feet in our model and that the network learns interpretable patterns about the neighborhood structure and density.}}, author = {{Kucklick, Jan-Peter and Müller, Oliver}}, issn = {{2158-656X}}, journal = {{ACM Transactions on Management Information Systems}}, keywords = {{Interpretability, Convolutional Neural Network, Accuracy-Interpretability Trade-Of, Real Estate Appraisal, Hedonic Pricing, Grad-Ram}}, publisher = {{Association for Computing Machinery (ACM)}}, title = {{{Tackling the Accuracy–Interpretability Trade-off: Interpretable Deep Learning Models for Satellite Image-based Real Estate Appraisal}}}, doi = {{10.1145/3567430}}, year = {{2022}}, } @inproceedings{41486, abstract = {{Now accounting for more than 80% of a firm's worth, brands have become essential assets for modern organizations. However, methods and techniques for the monetary valuation of brands are still under-researched. Hence, the objective of this study is to evaluate the utility of explanatory statistical models and machine learning approaches for explaining and predicting brand value. Drawing upon the case of the most valuable English football brands during the 2016/17 to 2020/21 seasons, we demonstrate how to operationalize Aaker's (1991) theoretical brand equity framework to collect meaningful qualitative and quantitative feature sets. Our explanatory models can explain up to 77% of the variation in brand valuations across all clubs and seasons, while our predictive approach can predict out-of-sample observations with a mean absolute percentage error (MAPE) of 14%. Future research can build upon our results to develop domain-specific brand valuation methods while enabling managers to make better-informed investment decisions.}}, author = {{Caron, Matthew and Bartelheimer, Christian and Müller, Oliver}}, booktitle = {{Proceeding of the 28th Americas Conference on Information Systems (AMCIS)}}, location = {{Minneapolis, USA}}, title = {{{Towards a Reliable & Transparent Approach to Data-Driven Brand Valuation}}}, year = {{2022}}, } @inproceedings{36912, abstract = {{Existing process mining methods are primarily designed for processes that have reached a high degree of digitalization and standardization. In contrast, the literature has only begun to discuss how process mining can be applied to knowledge-intensive processes—such as product innovation processes—that involve creative activities, require organizational flexibility, depend on single actors’ decision autonomy, and target process-external goals such as customer satisfaction. Due to these differences, existing Process Mining methods cannot be applied out-of-the-box to analyze knowledge-intensive processes. In this paper, we employ Action Design Research (ADR) to design and evaluate a process mining approach for knowledge-intensive processes. More specifically, we draw on the two processes of product innovation and engineer-to-order in manufacturing contexts. We collected data from 27 interviews and conducted 49 workshops to evaluate our IT artifact at different stages in the ADR process. From a theoretical perspective, we contribute five design principles and a conceptual artifact that prescribe how process mining ought to be designed for knowledge-intensive processes in manufacturing. From a managerial perspective, we demonstrate how enacting these principles enables their application in practice.}}, author = {{Löhr, Bernd and Brennig, Katharina and Bartelheimer, Christian and Beverungen, Daniel and Müller, Oliver}}, booktitle = {{International Conference on Business Process Management}}, isbn = {{978-3-031-16103-2}}, title = {{{Process Mining of Knowledge-Intensive Processes: An Action Design Research Study in Manufacturing}}}, doi = {{10.1007/978-3-031-16103-2_18}}, year = {{2022}}, } @inproceedings{25113, abstract = {{Our world is more connected than ever before. Sadly, however, this highly connected world has made it easier to bully, insult, and propagate hate speech on the cyberspace. Even though researchers and companies alike have started investigating this real-world problem, the question remains as to why users are increasingly being exposed to hate and discrimination online. In fact, the noticeable and persistent increase in harmful language on social media platforms indicates that the situation is, actually, only getting worse. Hence, in this work, we show that contemporary ML methods can help tackle this challenge in an accurate and cost-effective manner. Our experiments demonstrate that a universal approach combining transfer learning methods and state-of-the-art Transformer architectures can trigger the efficient development of toxic language detection models. Consequently, with this universal approach, we provide platform providers with a simplistic approach capable of enabling the automated moderation of user-generated content, and as a result, hope to contribute to making the web a safer place.}}, author = {{Caron, Matthew and Bäumer, Frederik S. and Müller, Oliver}}, booktitle = {{55th Hawaii International Conference on System Sciences (HICSS)}}, location = {{Online}}, title = {{{Towards Automated Moderation: Enabling Toxic Language Detection with Transfer Learning and Attention-Based Models}}}, year = {{2022}}, } @inproceedings{21204, author = {{Kucklick, Jan-Peter and Müller, Oliver}}, booktitle = {{ The AAAI-21 Workshop on Knowledge Discovery from Unstructured Data in Financial Services}}, title = {{{A Comparison of Multi-View Learning Strategies for Satellite Image-based Real Estate Appraisal}}}, year = {{2021}}, } @inproceedings{22514, author = {{Kucklick, Jan-Peter and Müller, Jennifer and Beverungen, Daniel and Müller, Oliver}}, booktitle = {{European Conference on Information Systems}}, location = {{Virtual}}, title = {{{Quantifying the Impact of Location Data for Real Estate Appraisal – A GIS-based Deep Learning Approach}}}, year = {{2021}}, } @inproceedings{24547, abstract = {{Over the last years, several approaches for the data-driven estimation of expected possession value (EPV) in basketball and association football (soccer) have been proposed. In this paper, we develop and evaluate PIVOT: the first such framework for team handball. Accounting for the fast-paced, dynamic nature and relative data scarcity of hand- ball, we propose a parsimonious end-to-end deep learning architecture that relies solely on tracking data. This efficient approach is capable of predicting the probability that a team will score within the near future given the fine-grained spatio-temporal distribution of all players and the ball over the last seconds of the game. Our experiments indicate that PIVOT is able to produce accurate and calibrated probability estimates, even when trained on a relatively small dataset. We also showcase two interactive applications of PIVOT for valuing actual and counterfactual player decisions and actions in real-time.}}, author = {{Müller, Oliver and Caron, Matthew and Döring, Michael and Heuwinkel, Tim and Baumeister, Jochen}}, booktitle = {{8th Workshop on Machine Learning and Data Mining for Sports Analytics (ECML PKDD 2021)}}, keywords = {{expected possession value, handball, tracking data, time series classification, deep learning}}, location = {{Online}}, title = {{{PIVOT: A Parsimonious End-to-End Learning Framework for Valuing Player Actions in Handball using Tracking Data}}}, year = {{2021}}, } @inproceedings{25029, abstract = {{In early 2021, the finance world was taken by storm by the dramatic price surge of the GameStop Corp. stock. This rise is being, at least in part, attributed to a group of Redditors belonging to the now-famous r/wallstreetbets (WSB) subreddit group. In this work, we set out to address if user activity on the WSB subreddit is associated with the trading volume of the GME stock. Leveraging a unique dataset containing more than 4.9 million WSB posts and comments, we assert that user activity is associated with the trading volume of the GameStop stock. We further show that posts have a significantly higher predictive power than comments and are especially helpful for predicting unusually high trading volume. Lastly, as recent events have shown, we believe that these findings have implications for retail and institutional investors, trading platforms, and policymakers, as these can have disruptive potential.}}, author = {{Caron, Matthew and Gulenko, Maryna and Müller, Oliver}}, booktitle = {{42nd International Conference on Information Systems (ICIS 2021)}}, keywords = {{Retail investors, GameStop, Social Networks, Reddit, WallStreetBets}}, location = {{Austin, Texas}}, title = {{{To the Moon! Analyzing the Community of “Degenerates” Engaged in the Surge of the GME Stock}}}, year = {{2021}}, } @inproceedings{26812, author = {{Leffrang, Dirk and Müller, Oliver}}, booktitle = {{IEEE Workshop on TRust and EXpertise in Visual Analytics}}, title = {{{Should I Follow this Model? The Effect of Uncertainty Visualization on the Acceptance of Time Series Forecasts}}}, doi = {{10.1109/TREX53765.2021.00009}}, year = {{2021}}, } @inproceedings{17348, author = {{Kucklick, Jan-Peter and Müller, Oliver}}, booktitle = {{Symposium on Statistical Challenges in Electronic Commerce Research (SCECR)}}, title = {{{Location, location, location: Satellite image-based real-estate appraisal}}}, year = {{2020}}, } @inproceedings{17140, author = {{Thiess, Tiemo and Müller, Oliver and Tonelli, Lorenzo}}, booktitle = {{International Conference on Wirtschaftsinformatik}}, title = {{{Design Principles for Explainable Sales Win-Propensity Prediction Systems}}}, doi = {{https://doi.org/10.30844/wi_2020_c8-thiess}}, year = {{2020}}, } @inproceedings{17095, abstract = {{In order to sustain their competitive advantage, data driven organizations must continue investing in business intelligence and analytics (BI&A) while mitigating inherent cost increases. Research shows that examining outlays by individual BI&A artifact (e.g. reports, analytics) is necessary, but introduction in practice is cumbersome and adoption is slow. BI&A service-oriented cost allocation (BIASOCA) represents an improvement to this situation. This approach enables to render the BI&A cost pool accountable and improves cost transparency, which leads to a higher BI&A penetration of economically viable applications in organizations. Against this background, this paper aims at designing and implementing BIASOCA in a medium-sized company. To record organizational impact and increase customer acceptance, this study is carried out as action design research (ADR). Our findings indicate improvements in BI&A management from working with consumers to locate cost savings and drivers. After invoicing, consumers’ BI&A awareness increased, releasing resources while also making a better understanding of BIASOCA necessary. We detail how to implement BIASOCA in a real-life setting and the challenges attendant in so doing. Our research contributes to theory and practice with a set of design principles highlighting, besides the accuracy of cost accounting, the importance of collaboration, model comprehensibility and strategic alignment.}}, author = {{Grytz, Raphael and Krohn-Grimberghe, Artus and Müller, Oliver}}, booktitle = {{European Conference on Information Systems}}, title = {{{Business Intelligence & Analytics Cost Accounting: An Action Design Research Approach}}}, year = {{2020}}, }