@inproceedings{25278,
  abstract     = {{Using Service Function Chaining (SFC) in wireless networks became popular in many domains like networking and multimedia. It relies on allocating network resources to incoming SFCs requests, via a Virtual Network Embedding (VNE) algorithm, so that it optimizes the performance of the SFC. When the load of incoming requests -- competing for the limited network resources -- increases, it becomes challenging to decide which requests should be admitted and which one should be rejected. In this work, we propose a deep Reinforcement learning (RL) solution that can learn the admission policy for different dependencies, such as the service lifetime and the priority of incoming requests. We compare the deep RL solution to a first-come-first-serve baseline that admits a request whenever there are available resources. We show that deep RL outperforms the baseline and provides higher acceptance rate with low rejections even when there are enough resources.}},
  author       = {{Afifi, Haitham and Sauer, Fabian Jakob and Karl, Holger}},
  booktitle    = {{2021 IEEE International Conference on Advanced Networks and Telecommunications Systems (ANTS) (ANTS'21)}},
  keywords     = {{reinforcement learning, admission control, wireless sensor networks}},
  title        = {{{Reinforcement Learning for Admission Control in Wireless Virtual Network Embedding}}},
  year         = {{2021}},
}

@inproceedings{25281,
  abstract     = {{Wireless Acoustic Sensor Networks (WASNs) have a wide range of audio signal processing applications. Due to the spatial diversity of the microphone and their relative position to the acoustic source, not all microphones are equally useful for subsequent audio signal processing tasks, nor do they all have the same wireless data transmission rates. Hence, a central task in WASNs is to balance a microphone’s estimated acoustic utility against its transmission delay, selecting a best-possible subset of microphones to record audio signals.

In this work, we use reinforcement learning to decide if a microphone should be used or switched off to maximize the acoustic quality at low transmission delays, while minimizing switching frequency. In experiments with moving sources in a simulated acoustic environment, our method outperforms naive baseline comparisons}},
  author       = {{Afifi, Haitham and Guenther, Michael and Brendel, Andreas and Karl, Holger and Kellermann, Walter}},
  booktitle    = {{14. ITG Conference on Speech Communication (ITG 2021)}},
  keywords     = {{microphone utility, microphone selection, wireless acoustic sensor network, network delay, reinforcement learning}},
  title        = {{{Reinforcement Learning-based Microphone Selection in Wireless Acoustic Sensor Networks considering Network and Acoustic Utilities}}},
  year         = {{2021}},
}

@inproceedings{20125,
  abstract     = {{Datacenter applications have different resource requirements from network and developing flow scheduling heuristics for every workload is practically infeasible. In this paper, we show that deep reinforcement learning (RL) can be used to efficiently learn flow scheduling policies for different workloads without manual feature engineering. Specifically, we present LFS, which learns to optimize a high-level performance objective, e.g., maximize the number of flow admissions while meeting the deadlines. The LFS scheduler is trained through deep RL to learn a scheduling policy on continuous online flow arrivals. The evaluation results show that the trained LFS scheduler admits 1.05x more flows than the greedy flow scheduling heuristics under varying network load.}},
  author       = {{Hasnain, Asif and Karl, Holger}},
  booktitle    = {{2021 IEEE 18th Annual Consumer Communications & Networking Conference (CCNC)}},
  keywords     = {{Flow scheduling, Deadlines, Reinforcement learning}},
  location     = {{Las Vegas, USA}},
  publisher    = {{IEEE Computer Society}},
  title        = {{{Learning Flow Scheduling}}},
  doi          = {{https://doi.org/10.1109/CCNC49032.2021.9369514}},
  year         = {{2021}},
}

@article{28349,
  abstract     = {{Das Auftreten der COVID-19-Pandemie stellt Fremdsprachenkurse vielerorts vor Herausforderungen. Unter Zuhilfenahme diverser digitaler Tools werden nicht nur Lernmaterialien online geteilt, sondern auch die Interaktion zwischen Lehrenden und Lernenden sowie der Lernenden untereinander in den virtuellen Raum verlagert. Qualitative Interviews mit den Beteiligten erfassen, wie diese mit den Herausforderungen videogestützten Sprachunterrichts umgehen und welche Strategien sie wählen, um Sprachenlernen zu ermöglichen. Die Ergebnisse zeigen auf, wo seitens der Kursorganisation und -durchführung Handlungsbedarf besteht.
-----
The rise of the COVID-19 pandemic challenges the teaching and learning of foreign languages at many institutions. The implementation of various digital tools aids not only the online sharing of learning materials, but also shifts teacher-learner and learner-learner interaction to the virtual space. Via qualitative interviews, this study examines how both teachers and learners handle the challenges of language instruction based on videoconferences, and what strategies they employ to enable language learning. The results highlight areas in need of improvement in terms of course organization and facilitation.}},
  author       = {{Drumm, Sandra and Müller, Mareike and Stenzel, Nadja}},
  issn         = {{2511-0853}},
  journal      = {{Informationen Deutsch als Fremdsprache}},
  keywords     = {{German language courses at university, interaction, digital space, language learning/teaching via videoconference}},
  number       = {{5}},
  pages        = {{496--515}},
  title        = {{{Digitale Räume geben und nehmen: Unterrichtsinteraktion in DSH-Kursen während der COVID-19-Pandemie}}},
  doi          = {{10.1515/infodaf-2021-0069}},
  volume       = {{48}},
  year         = {{2021}},
}

@inproceedings{26049,
  abstract     = {{Content is the new oil. Users consume billions of terabytes a day while surfing on news sites or blogs, posting on social media sites, and sending chat messages around the globe. While content is heterogeneous, the dominant form of web content is text. There are situations where more diversity needs to be introduced into text content, for example, to reuse it on websites or to allow a chatbot to base its models on the information conveyed rather than of the language used. In order to achieve this, paraphrasing techniques have been developed: One example is Text spinning, a technique that automatically paraphrases text while leaving the intent intact. This makes it easier to reuse content, or to change the language generated by the bot more human. One method for modifying texts is a combination of translation and back-translation. This paper presents NATTS, a naive approach that uses transformer-based translation models to create diversified text, combining translation steps in one model. An advantage of this approach is that it can be fine-tuned and handle technical language.}},
  author       = {{Bäumer, Frederik Simon and Kersting, Joschka and Denisov, Sergej and Geierhos, Michaela}},
  booktitle    = {{PROCEEDINGS OF THE INTERNATIONAL CONFERENCES ON WWW/INTERNET 2021 AND APPLIED COMPUTING 2021}},
  keywords     = {{Software Requirements, Natural Language Processing, Transfer Learning, On-The-Fly Computing}},
  location     = {{Lisbon, Portugal}},
  pages        = {{221----225}},
  publisher    = {{IADIS}},
  title        = {{{IN OTHER WORDS: A NAIVE APPROACH TO TEXT SPINNING}}},
  year         = {{2021}},
}

@article{21004,
  abstract     = {{Automated machine learning (AutoML) supports the algorithmic construction and data-specific customization of machine learning pipelines, including the selection, combination, and parametrization of machine learning algorithms as main constituents. Generally speaking, AutoML approaches comprise two major components: a search space model and an optimizer for traversing the space. Recent approaches have shown impressive results in the realm of supervised learning, most notably (single-label) classification (SLC). Moreover, first attempts at extending these approaches towards multi-label classification (MLC) have been made. While the space of candidate pipelines is already huge in SLC, the complexity of the search space is raised to an even higher power in MLC. One may wonder, therefore, whether and to what extent optimizers established for SLC can scale to this increased complexity, and how they compare to each other. This paper makes the following contributions: First, we survey existing approaches to AutoML for MLC. Second, we augment these approaches with optimizers not previously tried for MLC. Third, we propose a benchmarking framework that supports a fair and systematic comparison. Fourth, we conduct an extensive experimental study, evaluating the methods on a suite of MLC problems. We find a grammar-based best-first search to compare favorably to other optimizers.}},
  author       = {{Wever, Marcel Dominik and Tornede, Alexander and Mohr, Felix and Hüllermeier, Eyke}},
  issn         = {{0162-8828}},
  journal      = {{IEEE Transactions on Pattern Analysis and Machine Intelligence}},
  keywords     = {{Automated Machine Learning, Multi Label Classification, Hierarchical Planning, Bayesian Optimization}},
  pages        = {{1--1}},
  title        = {{{AutoML for Multi-Label Classification: Overview and Empirical Evaluation}}},
  doi          = {{10.1109/tpami.2021.3051276}},
  year         = {{2021}},
}

@inproceedings{21005,
  abstract     = {{Data-parallel applications are developed using different data programming models, e.g., MapReduce, partition/aggregate. These models represent diverse resource requirements of application in a datacenter network, which can be represented by the coflow abstraction. The conventional method of creating hand-crafted coflow heuristics for admission or scheduling for different workloads is practically infeasible. In this paper, we propose a deep reinforcement learning (DRL)-based coflow admission scheme -- LCS -- that can learn an admission policy for a higher-level performance objective, i.e., maximize successful coflow admissions, without manual feature engineering.  LCS is trained on a production trace, which has online coflow arrivals. The evaluation results show that LCS is able to learn a reasonable admission policy that admits more coflows than state-of-the-art Varys heuristic while meeting their deadlines.}},
  author       = {{Hasnain, Asif and Karl, Holger}},
  booktitle    = {{IEEE INFOCOM 2021 - IEEE Conference on Computer Communications Workshops (INFOCOM WKSHPS)}},
  keywords     = {{Coflow scheduling, Reinforcement learning, Deadlines}},
  location     = {{Vancouver BC Canada}},
  publisher    = {{IEEE Communications Society}},
  title        = {{{Learning Coflow Admissions}}},
  doi          = {{10.1109/INFOCOMWKSHPS51825.2021.9484599}},
  year         = {{2021}},
}

@inproceedings{21479,
  abstract     = {{Two of the most important metrics when developing Wireless Sensor Networks (WSNs) applications are the Quality of Information (QoI) and Quality of Service (QoS). The former is used to specify the quality of the collected data by the sensors (e.g., measurements error or signal's intensity), while the latter defines the network's performance and availability (e.g., packet losses and latency). In this paper, we consider an example of wireless acoustic sensor networks, where we select a subset of microphones for two different objectives. First, we maximize the recording quality under QoS constraints. Second, we apply a trade-off between QoI and QoS. We formulate the problem as a constrained Markov Decision Problem (MDP) and solve it using reinforcement learning (RL). We compare the RL solution to a baseline model and show that in case of QoS-guarantee objective, the RL solution has an optimality gap up to 1\%. Meanwhile, the RL solution is better than the baseline with improvements up to 23\%, when using the trade-off objective.}},
  author       = {{Afifi, Haitham and Ramaswamy, Arunselvan and Karl, Holger}},
  booktitle    = {{2021 IEEE 18th Annual Consumer Communications \& Networking Conference (CCNC) (CCNC 2021)}},
  keywords     = {{reinforcement learning, wireless sensor networks, resource allocation, acoustic sensor networks}},
  title        = {{{A Reinforcement Learning QoI/QoS-Aware Approach in Acoustic Sensor Networks}}},
  year         = {{2021}},
}

@inproceedings{21543,
  abstract     = {{Services often consist of multiple chained components such as microservices in a service mesh, or machine learning functions in a pipeline. Providing these services requires online coordination including scaling the service, placing instance of all components in the network, scheduling traffic to these instances, and routing traffic through the network. Optimized service coordination is still a hard problem due to many influencing factors such as rapidly arriving user demands and limited node and link capacity. Existing approaches to solve the problem are often built on rigid models and assumptions, tailored to specific scenarios. If the scenario changes and the assumptions no longer hold, they easily break and require manual adjustments by experts. Novel self-learning approaches using deep reinforcement learning (DRL) are promising but still have limitations as they only address simplified versions of the problem and are typically centralized and thus do not scale to practical large-scale networks.

To address these issues, we propose a distributed self-learning service coordination approach using DRL. After centralized training, we deploy a distributed DRL agent at each node in the network, making fast coordination decisions locally in parallel with the other nodes. Each agent only observes its direct neighbors and does not need global knowledge. Hence, our approach scales independently from the size of the network. In our extensive evaluation using real-world network topologies and traffic traces, we show that our proposed approach outperforms a state-of-the-art conventional heuristic as well as a centralized DRL approach (60% higher throughput on average) while requiring less time per online decision (1 ms).}},
  author       = {{Schneider, Stefan Balthasar and Qarawlus, Haydar and Karl, Holger}},
  booktitle    = {{IEEE International Conference on Distributed Computing Systems (ICDCS)}},
  keywords     = {{network management, service management, coordination, reinforcement learning, distributed}},
  location     = {{Washington, DC, USA}},
  publisher    = {{IEEE}},
  title        = {{{Distributed Online Service Coordination Using Deep Reinforcement Learning}}},
  year         = {{2021}},
}

@inproceedings{22481,
  abstract     = {{During the industrial processing of materials for the manufacture of new products, surface defects can quickly occur. In order to achieve high quality without a long time delay, it makes sense to inspect the work pieces so that defective work pieces can be sorted out right at the beginning of the process. At the same time, the evaluation unit should come close the perception of the human eye regarding detection of defects in surfaces. Such defects often manifest themselves by a deviation of the existing structure. The only restriction should be that only matt surfaces should be considered here. Therefore in this work, different classification and image processing algorithms are applied to surface data to identify possible surface damages. For this purpose, the Gabor filter and the FST (Fused Structure and Texture) features generated with it, as well as the salience metric are used on the image processing side. On the classification side, however, deep neural networks, Convolutional Neural Networks (CNN), and autoencoders are used to make a decision. A distinction is also made between training using class labels and without. It turns out later that the salience metric are best performed by CNN. On the other hand, if there is no labeled training data available, a novelty classification can easily be achieved by using autoencoders as well as the salience metric and some filters.}},
  author       = {{Sander, Tom and Lange, Sven and Hilleringmann, Ulrich and Geneis, Volker and Hedayat, Christian and Kuhn, Harald and Gockel, Franz-Barthold}},
  booktitle    = {{22nd IEEE International Conference on Industrial Technology (ICIT)}},
  isbn         = {{9781728157306}},
  keywords     = {{Image Processing, Defect Detection, wooden surfaces, Machine Learning, Neural Networks}},
  location     = {{Valencia, Spain }},
  publisher    = {{IEEE}},
  title        = {{{Detection of Defects on Irregular Structured Surfaces by Image Processing Methods for Feature Extraction}}},
  doi          = {{10.1109/icit46573.2021.9453646}},
  year         = {{2021}},
}

@article{21808,
  abstract     = {{Modern services consist of interconnected components,e.g., microservices in a service mesh or machine learning functions in a pipeline. These services can scale and run across multiple network nodes on demand. To process incoming traffic, service components have to be instantiated and traffic assigned to these instances, taking capacities, changing demands, and Quality of Service (QoS) requirements into account. This challenge is usually solved with custom approaches designed by experts. While this typically works well for the considered scenario, the models often rely on unrealistic assumptions or on knowledge that is not available in practice (e.g., a priori knowledge).

We propose DeepCoord, a novel deep reinforcement learning approach that learns how to best coordinate services and is geared towards realistic assumptions. It interacts with the network and relies on available, possibly delayed monitoring information. Rather than defining a complex model or an algorithm on how to achieve an objective, our model-free approach adapts to various objectives and traffic patterns. An agent is trained offline without expert knowledge and then applied online with minimal overhead. Compared to a state-of-the-art heuristic, DeepCoord significantly improves flow throughput (up to 76%) and overall network utility (more than 2x) on realworld network topologies and traffic traces. It also supports optimizing multiple, possibly competing objectives, learns to respect QoS requirements, generalizes to scenarios with unseen, stochastic traffic, and scales to large real-world networks. For reproducibility and reuse, our code is publicly available.}},
  author       = {{Schneider, Stefan Balthasar and Khalili, Ramin and Manzoor, Adnan and Qarawlus, Haydar and Schellenberg, Rafael and Karl, Holger and Hecker, Artur}},
  journal      = {{Transactions on Network and Service Management}},
  keywords     = {{network management, service management, coordination, reinforcement learning, self-learning, self-adaptation, multi-objective}},
  publisher    = {{IEEE}},
  title        = {{{Self-Learning Multi-Objective Service Coordination Using Deep Reinforcement Learning}}},
  doi          = {{10.1109/TNSM.2021.3076503}},
  year         = {{2021}},
}

@inproceedings{27381,
  abstract     = {{Graph neural networks (GNNs) have been successfully applied in many structured data domains, with applications ranging from molecular property prediction to the analysis of social networks. Motivated by the broad applicability of GNNs, we propose the family of so-called RankGNNs, a combination of neural Learning to Rank (LtR) methods and GNNs. RankGNNs are trained with a set of pair-wise preferences between graphs, suggesting that one of them is preferred over the other. One practical application of this problem is drug screening, where an expert wants to find the most promising molecules in a large collection of drug candidates. We empirically demonstrate that our proposed pair-wise RankGNN approach either significantly outperforms or at least matches the ranking performance of the naive point-wise baseline approach, in which the LtR problem is solved via GNN-based graph regression.}},
  author       = {{Damke, Clemens and Hüllermeier, Eyke}},
  booktitle    = {{Proceedings of The 24th International Conference on Discovery Science (DS 2021)}},
  editor       = {{Soares, Carlos and Torgo, Luis}},
  isbn         = {{9783030889418}},
  issn         = {{0302-9743}},
  keywords     = {{Graph-structured data, Graph neural networks, Preference learning, Learning to rank}},
  location     = {{Halifax, Canada}},
  pages        = {{166--180}},
  publisher    = {{Springer}},
  title        = {{{Ranking Structured Objects with Graph Neural Networks}}},
  doi          = {{10.1007/978-3-030-88942-5}},
  volume       = {{12986}},
  year         = {{2021}},
}

@article{20212,
  abstract     = {{Ideational impact refers to the uptake of a paper's ideas and concepts by subsequent research. It is defined in stark contrast to total citation impact, a measure predominantly used in research evaluation that assumes that all citations are equal. Understanding ideational impact is critical for evaluating research impact and understanding how scientific disciplines build a cumulative tradition. Research has only recently developed automated citation classification techniques to distinguish between different types of citations and generally does not emphasize the conceptual content of the citations and its ideational impact. To address this problem, we develop Deep Content-enriched Ideational Impact Classification (Deep-CENIC) as the first automated approach for ideational impact classification to support researchers' literature search practices. We evaluate Deep-CENIC on 1,256 papers citing 24 information systems review articles from the IT business value domain. We show that Deep-CENIC significantly outperforms state-of-the-art benchmark models. We contribute to information systems research by operationalizing the concept of ideational impact, designing a recommender system for academic papers based on deep learning techniques, and empirically exploring the ideational impact of the IT business value domain.
}},
  author       = {{Prester, Julian and Wagner, Gerit and Schryen, Guido and Hassan, Nik Rushdi}},
  journal      = {{Decision Support Systems}},
  keywords     = {{Ideational impact, citation classification, academic recommender systems, natural language processing, deep learning, cumulative tradition}},
  number       = {{January}},
  title        = {{{Classifying the Ideational Impact of Information Systems Review Articles: A Content-Enriched Deep Learning Approach}}},
  volume       = {{140}},
  year         = {{2021}},
}

@techreport{33854,
  abstract     = {{Macrodiversity is a key technique to increase the capacity of mobile networks. It can be realized using coordinated multipoint (CoMP), simultaneously connecting users to multiple overlapping cells. Selecting which users to serve by how many and which cells is NP-hard but needs to happen continuously in real time as users move and channel state changes. Existing approaches often require strict assumptions about or perfect knowledge of the underlying radio system, its resource allocation scheme, or user movements, none of which is readily available in practice.

Instead, we propose three novel self-learning and self-adapting approaches using model-free deep reinforcement learning (DRL): DeepCoMP, DD-CoMP, and D3-CoMP. DeepCoMP leverages central observations and control of all users to select cells almost optimally. DD-CoMP and D3-CoMP use multi-agent DRL, which allows distributed, robust, and highly scalable coordination. All three approaches learn from experience and self-adapt to varying scenarios, reaching 2x higher Quality of Experience than other approaches. They have very few built-in assumptions and do not need prior system knowledge, making them more robust to change and better applicable in practice than existing approaches.}},
  author       = {{Schneider, Stefan Balthasar and Karl, Holger and Khalili, Ramin and Hecker, Artur}},
  keywords     = {{mobility management, coordinated multipoint, CoMP, cell selection, resource management, reinforcement learning, multi agent, MARL, self-learning, self-adaptation, QoE}},
  title        = {{{DeepCoMP: Coordinated Multipoint Using Multi-Agent Deep Reinforcement Learning}}},
  year         = {{2021}},
}

@inbook{29102,
  abstract     = {{Das Studium der Wirtschaftspädagogik bereitet Studierende auf das didaktische Handeln in beruflichen Lehr-Lernkontexten (u. a. berufliche Schulen, Ausbildung in Betrieben) vor. Theorie-Praxis-Verzahnung ist somit aus zwei Perspektiven zu modellieren: Einerseits geht es um den Aufbau eines fachwissenschaftlichen Verständnisses, welches von den Handlungszusammenhängen in einer beruflichen Domäne mit kaufmännisch-verwaltenden Bezügen ausgeht und weniger auf einer rein fachwissenschaftlichen Bildung beruht. Die zukünftige Berufspraxis der Schülerinnen und Schüler muss in den Blick genommen werden. Andererseits geht es um die Professionalisierung als pädagogisches Personal, welches berufsbezogene Lernprozesse fachdidaktisch gestalten kann. Die zukünftige Lehrpraxis in beruflichen Lehr-Lernkontexten ist in den Blick zu nehmen. Zielstellung des Beitrages ist es, diese doppelte Theorie-Praxis-Verzahnung als Konstitutionsmerkmal der Wirtschaftspädagogik aufzuzeigen (Abschn. 2), um darauf basierend anhand von Theorien des Lernens am Arbeitsplatz Potenziale und Grenzen des Lernortes Praxis als Beitrag zur Professionalisierung angehender Wirtschaftspädagog*innen im Studium herauszuarbeiten (Abschn. 3). Am Beispiel des Konzeptes von Universitätsschulen soll eine Umsetzungsvariante zur Theorie-Praxis-Verzahnung unter Herausarbeitung der Potenziale der jeweiligen Lernorte Schule und Universität aufgezeigt werden (Abschn. 4).}},
  author       = {{Gerholz, Karl-Heinz and Goller, Michael}},
  booktitle    = {{Edition Fachdidaktiken}},
  editor       = {{Caruso, Carina and Harteis, Christian and Gröschner, Alexander}},
  isbn         = {{9783658325671}},
  issn         = {{2524-8677}},
  keywords     = {{Berufliche Lehrerbildung, Professional Learning, Theorie-Praxis-Verzahnung, Wirtschaftspädagogik, Universitätsschulen}},
  pages        = {{393--419}},
  publisher    = {{Springer}},
  title        = {{{Theorie-Praxis-Verzahnung in der Wirtschaftspädagogik: Potenziale und Grenzen des Lernortes Praxis}}},
  doi          = {{10.1007/978-3-658-32568-8_22}},
  year         = {{2021}},
}

@article{35202,
  abstract     = {{Purpose: This study aims at investigating how digitalisation (in the sense of industry 4.0) has changed the work of farmers and how they experience the changes from more traditional work to digitalised agriculture. It also investigates what knowledge farmers require on digitalised farms and how they acquire it. Dairy farming was used as domain of investigation since it, unlike other industries, has strongly been affected by digitalisation throughout the last years.

Method: Exploratory interviews with 10 livestock farmers working on digitalised dairy farms were analysed using qualitative content analysis. A deductive and inductive coding strategy was used. 

Findings: Farming work has changed from more manual tasks towards symbol manipulation and data processing. Farmers must be able to use computers and other digital devices to retrieve and analyse sensor data that allow them to monitor and control the processes on their farm. For this new kind of work, farmers require elaborated mental models that link traditional farming knowledge with knowledge about digital systems, including a strong understanding of production processes underlying their farm. Learning is mostly based on instructions offered by manufacturers of the new technology as well as informal and non-formal learning modes. Even younger farmers report that digital technology was not sufficiently covered in their (vocational) degrees. In general, farmers emphasises the positive effects of digitalisation both on their working as well as private life. 

Conclusions: Farmers should be aware of the opportunities as well as the potential drawbacks of the digitalisation of work processes in agriculture. Providers of agricultural education (like vocational schools or training institutes) need to incorporate the knowledge and skills required to work in digitalised environments (e.g., data literacy) in their syllabi. Further studies are required to assess how digitalisation changes farming practices and what knowledge as well as skills linked to these developments are required in the future.}},
  author       = {{Goller, Michael and Caruso, Carina and Harteis, Christian}},
  issn         = {{2197-8646}},
  journal      = {{International Journal for Research in Vocational Education and Training}},
  keywords     = {{Work-Based Learning, Organisational Change, Digital Competences, Qualitative Research, Digitalisation, Farming, Dairy, VET, Vocational Education and Training}},
  number       = {{2}},
  pages        = {{208–223}},
  title        = {{{Digitalisation in Agriculture: Knowledge and Learning Requirements of German Dairy Farmers}}},
  doi          = {{10.13152/IJRVET.8.2.4.}},
  volume       = {{8}},
  year         = {{2021}},
}

@inbook{35464,
  abstract     = {{The digital transformation of organizations in the industrial sector is primarily driven by the opportunity to increase productivity while simultaneously reducing costs through integration into a cyber-physical system. One way to fully tap the potential of a cyber-physical system is the concept of the digital twin, i.e., the real-time digital representation of machines and resources involved – including human resources. The vision of representing humans by digital twins primarily aims at increasing economic benefits. The digital twin of a human, however, cannot be designed in a similar way to that of a machine. The human digital twin shall rather enable humans to act within the cyber-physical system. It therefore offers humans a power of control and the opportunity to provide feedback. The concept of the digital twin is still in its infancy and raises many questions in particular from an educational perspective. The contribution aims at answering the following questions and refers to the example of team learning: Which and how much data should and may the digital twin contain in order to support humans in their learning? To what extent will humans be able to control and design their own learning? How may skills, experiences, and social interactions of humans be represented in the digital twin; their growth and further development, respectively? With cyber-physical systems transcending corporate, national, and legal boundaries, what learning culture will be the frame of reference for the involved organizations?}},
  author       = {{Berisha-Gawlowski,  Angelina and Caruso, Carina and Harteis, Christian}},
  booktitle    = {{Digital Transformation of Learning Organizations  }},
  editor       = {{Ifenthaler, Dirk and Hofhues, Sandra and Egloffstein, Marc and Helbig, Christian}},
  isbn         = {{978-3-030-55877-2}},
  keywords     = {{Digital twin, Learning organization, Change, Team learning, Professional development}},
  pages        = {{ 95–114}},
  publisher    = {{Springer}},
  title        = {{{The Concept of a Digital Twin and Its Potential for Learning Organizations}}},
  doi          = {{10.1007/978-3-030-55878-9_6}},
  year         = {{2021}},
}

@techreport{35889,
  abstract     = {{Network and service coordination is important to provide modern services consisting of multiple interconnected components, e.g., in 5G, network function virtualization (NFV), or cloud and edge computing. In this paper, I outline my dissertation research, which proposes six approaches to automate such network and service coordination. All approaches dynamically react to the current demand and optimize coordination for high service quality and low costs. The approaches range from centralized to distributed methods and from conventional heuristic algorithms and mixed-integer linear programs to machine learning approaches using supervised and reinforcement learning. I briefly discuss their main ideas and advantages over other state-of-the-art approaches and compare strengths and weaknesses.}},
  author       = {{Schneider, Stefan Balthasar}},
  keywords     = {{nfv, coordination, machine learning, reinforcement learning, phd, digest}},
  title        = {{{Conventional and Machine Learning Approaches for Network and Service Coordination}}},
  year         = {{2021}},
}

@techreport{37136,
  abstract     = {{This study examines the relation between voluntary audit and the cost of debt in private firms. We use a sample of 4,058 small private firms operating in the period 2006‐2017 that are not subject to mandatory audits. Firms decide for a voluntary audit of financial statements either because the economic setting in which they operate effectively forces them to do so (e.g., ownership complexity, export‐oriented supply chain, subsidiary status) or because firm fundamentals and/or financial reporting practices limit their access to financial debt, both reflected in earnings quality. We use these factors to model the decision for voluntary audit. In the outcome analyses, we find robust evidence that voluntary audits are associated with higher, rather than lower, interest rate by up to 3.0 percentage points. This effect is present regardless of the perceived audit quality (Big‐4 vs. non‐Big‐4), but is stronger for non‐Big‐4 audits where auditees have a stronger position relative to auditors. Audited firms’ earnings are less informative about future operating performance relative to unaudited counterparts. We conclude that voluntary audits facilitate access to financial debt for firms with higher risk that may otherwise have no access to this form of financing. The price paid is reflected in higher interest rates charged to firms with voluntary audits – firms with higher information and/or fundamental risk.}},
  author       = {{Ichev, Riste and Koren, Jernej and Kosi, Urska and Sitar Sustar, Katarina and Valentincic, Aljosa}},
  keywords     = {{private firms, voluntary audit, cost of debt, self‐selection bias, risk}},
  title        = {{{Cost of Debt for Private Firms Revisited: Voluntary Audits as a Reflection of Risk}}},
  year         = {{2021}},
}

@inproceedings{24547,
  abstract     = {{Over the last years, several approaches for the data-driven estimation of expected possession value (EPV) in basketball and association football (soccer) have been proposed. In this paper, we develop and evaluate PIVOT: the first such framework for team handball. Accounting for the fast-paced, dynamic nature and relative data scarcity of hand- ball, we propose a parsimonious end-to-end deep learning architecture that relies solely on tracking data. This efficient approach is capable of predicting the probability that a team will score within the near future given the fine-grained spatio-temporal distribution of all players and the ball over the last seconds of the game. Our experiments indicate that PIVOT is able to produce accurate and calibrated probability estimates, even when trained on a relatively small dataset. We also showcase two interactive applications of PIVOT for valuing actual and counterfactual player decisions and actions in real-time.}},
  author       = {{Müller, Oliver and Caron, Matthew and Döring, Michael and Heuwinkel, Tim and Baumeister, Jochen}},
  booktitle    = {{8th Workshop on Machine Learning and Data Mining for Sports Analytics (ECML PKDD 2021)}},
  keywords     = {{expected possession value, handball, tracking data, time series classification, deep learning}},
  location     = {{Online}},
  title        = {{{PIVOT: A Parsimonious End-to-End Learning Framework for Valuing Player Actions in Handball using Tracking Data}}},
  year         = {{2021}},
}

