@article{44077, author = {{Maack, Marten}}, issn = {{0167-6377}}, journal = {{Operations Research Letters}}, keywords = {{Applied Mathematics, Industrial and Manufacturing Engineering, Management Science and Operations Research, Software}}, number = {{3}}, pages = {{220--225}}, publisher = {{Elsevier BV}}, title = {{{Online load balancing on uniform machines with limited migration}}}, doi = {{10.1016/j.orl.2023.02.013}}, volume = {{51}}, year = {{2023}}, } @inbook{45895, author = {{Karl, Holger and Maack, Marten and Meyer auf der Heide, Friedhelm and Pukrop, Simon and Redder, Adrian}}, booktitle = {{On-The-Fly Computing -- Individualized IT-services in dynamic markets}}, editor = {{Haake, Claus-Jochen and Meyer auf der Heide, Friedhelm and Platzner, Marco and Wachsmuth, Henning and Wehrheim, Heike}}, pages = {{183--202}}, publisher = {{Heinz Nixdorf Institut, Universität Paderborn}}, title = {{{On-The-Fly Compute Centers II: Execution of Composed Services in Configurable Compute Centers}}}, doi = {{10.5281/zenodo.8068664}}, volume = {{412}}, year = {{2023}}, } @book{45863, abstract = {{In the proposal for our CRC in 2011, we formulated a vision of markets for IT services that describes an approach to the provision of such services that was novel at that time and, to a large extent, remains so today: „Our vision of on-the-fly computing is that of IT services individually and automatically configured and brought to execution from flexibly combinable services traded on markets. At the same time, we aim at organizing markets whose participants maintain a lively market of services through appropriate entrepreneurial actions.“ Over the last 12 years, we have developed methods and techniques to address problems critical to the convenient, efficient, and secure use of on-the-fly computing. Among other things, we have made the description of services more convenient by allowing natural language input, increased the quality of configured services through (natural language) interaction and more efficient configuration processes and analysis procedures, made the quality of (the products of) providers in the marketplace transparent through reputation systems, and increased the resource efficiency of execution through reconfigurable heterogeneous computing nodes and an integrated treatment of service description and configuration. We have also developed network infrastructures that have a high degree of adaptivity, scalability, efficiency, and reliability, and provide cryptographic guarantees of anonymity and security for market participants and their products and services. To demonstrate the pervasiveness of the OTF computing approach, we have implemented a proof-of-concept for OTF computing that can run typical scenarios of an OTF market. We illustrated the approach using a cutting-edge application scenario – automated machine learning (AutoML). Finally, we have been pushing our work for the perpetuation of On-The-Fly Computing beyond the SFB and sharing the expertise gained in the SFB in events with industry partners as well as transfer projects. This work required a broad spectrum of expertise. Computer scientists and economists with research interests such as computer networks and distributed algorithms, security and cryptography, software engineering and verification, configuration and machine learning, computer engineering and HPC, microeconomics and game theory, business informatics and management have successfully collaborated here.}}, author = {{Haake, Claus-Jochen and Meyer auf der Heide, Friedhelm and Platzner, Marco and Wachsmuth, Henning and Wehrheim, Heike}}, pages = {{247}}, publisher = {{Heinz Nixdorf Institut, Universität Paderborn}}, title = {{{On-The-Fly Computing -- Individualized IT-services in dynamic markets}}}, doi = {{10.17619/UNIPB/1-1797}}, volume = {{412}}, year = {{2023}}, } @article{50458, abstract = {{AbstractConsider a set of jobs connected to a directed acyclic task graph with a fixed source and sink. The edges of this graph model precedence constraints and the jobs have to be scheduled with respect to those. We introduce the server cloud scheduling problem, in which the jobs have to be processed either on a single local machine or on one of infinitely many cloud machines. For each job, processing times both on the server and in the cloud are given. Furthermore, for each edge in the task graph, a communication delay is included in the input and has to be taken into account if one of the two jobs is scheduled on the server and the other in the cloud. The server processes jobs sequentially, whereas the cloud can serve as many as needed in parallel, but induces costs. We consider both makespan and cost minimization. The main results are an FPTAS for the makespan objective for graphs with a constant source and sink dividing cut and strong hardness for the case with unit processing times and delays.}}, author = {{Maack, Marten and Meyer auf der Heide, Friedhelm and Pukrop, Simon}}, issn = {{0178-4617}}, journal = {{Algorithmica}}, keywords = {{Applied Mathematics, Computer Science Applications, General Computer Science}}, publisher = {{Springer Science and Business Media LLC}}, title = {{{Server Cloud Scheduling}}}, doi = {{10.1007/s00453-023-01189-x}}, year = {{2023}}, } @inproceedings{50460, author = {{Deppert, Max A. and Jansen, Klaus and Maack, Marten and Pukrop, Simon and Rau, Malin}}, booktitle = {{2023 IEEE International Parallel and Distributed Processing Symposium (IPDPS)}}, publisher = {{IEEE}}, title = {{{Scheduling with Many Shared Resources}}}, doi = {{10.1109/ipdps54959.2023.00049}}, year = {{2023}}, } @inproceedings{30236, abstract = {{Recent reinforcement learning approaches for continuous control in wireless mobile networks have shown impressive results. But due to the lack of open and compatible simulators, authors typically create their own simulation environments for training and evaluation. This is cumbersome and time-consuming for authors and limits reproducibility and comparability, ultimately impeding progress in the field. To this end, we propose mobile-env, a simple and open platform for training, evaluating, and comparing reinforcement learning and conventional approaches for continuous control in mobile wireless networks. mobile-env is lightweight and implements the common OpenAI Gym interface and additional wrappers, which allows connecting virtually any single-agent or multi-agent reinforcement learning framework to the environment. While mobile-env provides sensible default values and can be used out of the box, it also has many configuration options and is easy to extend. We therefore believe mobile-env to be a valuable platform for driving meaningful progress in autonomous coordination of wireless mobile networks.}}, author = {{Schneider, Stefan Balthasar and Werner, Stefan and Khalili, Ramin and Hecker, Artur and Karl, Holger}}, booktitle = {{IEEE/IFIP Network Operations and Management Symposium (NOMS)}}, keywords = {{wireless mobile networks, network management, continuous control, cognitive networks, autonomous coordination, reinforcement learning, gym environment, simulation, open source}}, location = {{Budapest}}, publisher = {{IEEE}}, title = {{{mobile-env: An Open Platform for Reinforcement Learning in Wireless Mobile Networks}}}, year = {{2022}}, } @inproceedings{33085, author = {{Epstein, Leah and Lassota, Alexandra and Levin, Asaf and Maack, Marten and Rohwedder, Lars}}, booktitle = {{39th International Symposium on Theoretical Aspects of Computer Science, STACS 2022, March 15-18, 2022, Marseille, France (Virtual Conference)}}, editor = {{Berenbrink, Petra and Monmege, Benjamin}}, pages = {{28:1–28:15}}, publisher = {{Schloss Dagstuhl - Leibniz-Zentrum für Informatik}}, title = {{{Cardinality Constrained Scheduling in Online Models}}}, doi = {{10.4230/LIPIcs.STACS.2022.28}}, volume = {{219}}, year = {{2022}}, } @inproceedings{32811, abstract = {{The decentralized nature of multi-agent systems requires continuous data exchange to achieve global objectives. In such scenarios, Age of Information (AoI) has become an important metric of the freshness of exchanged data due to the error-proneness and delays of communication systems. Communication systems usually possess dependencies: the process describing the success or failure of communication is highly correlated when these attempts are ``close'' in some domain (e.g. in time, frequency, space or code as in wireless communication) and is, in general, non-stationary. To study AoI in such scenarios, we consider an abstract event-based AoI process $\Delta(n)$, expressing time since the last update: If, at time $n$, a monitoring node receives a status update from a source node (event $A(n-1)$ occurs), then $\Delta(n)$ is reset to one; otherwise, $\Delta(n)$ grows linearly in time. This AoI process can thus be viewed as a special random walk with resets. The event process $A(n)$ may be nonstationary and we merely assume that its temporal dependencies decay sufficiently, described by $\alpha$-mixing. We calculate moment bounds for the resulting AoI process as a function of the mixing rate of $A(n)$. Furthermore, we prove that the AoI process $\Delta(n)$ is itself $\alpha$-mixing from which we conclude a strong law of large numbers for $\Delta(n)$. These results are new, since AoI processes have not been studied so far in this general strongly mixing setting. This opens up future work on renewal processes with non-independent interarrival times.}}, author = {{Redder, Adrian and Ramaswamy, Arunselvan and Karl, Holger}}, booktitle = {{Proceedings of the 58th Allerton Conference on Communication, Control, and Computing}}, title = {{{Age of Information Process under Strongly Mixing Communication -- Moment Bound, Mixing Rate and Strong Law}}}, year = {{2022}}, } @inproceedings{30793, author = {{Redder, Adrian and Ramaswamy, Arunselvan and Karl, Holger}}, booktitle = {{Proceedings of the 14th International Conference on Agents and Artificial Intelligence}}, publisher = {{SCITEPRESS - Science and Technology Publications}}, title = {{{Multi-agent Policy Gradient Algorithms for Cyber-physical Systems with Lossy Communication}}}, doi = {{10.5220/0010845400003116}}, year = {{2022}}, } @unpublished{30790, abstract = {{Iterative distributed optimization algorithms involve multiple agents that communicate with each other, over time, in order to minimize/maximize a global objective. In the presence of unreliable communication networks, the Age-of-Information (AoI), which measures the freshness of data received, may be large and hence hinder algorithmic convergence. In this paper, we study the convergence of general distributed gradient-based optimization algorithms in the presence of communication that neither happens periodically nor at stochastically independent points in time. We show that convergence is guaranteed provided the random variables associated with the AoI processes are stochastically dominated by a random variable with finite first moment. This improves on previous requirements of boundedness of more than the first moment. We then introduce stochastically strongly connected (SSC) networks, a new stochastic form of strong connectedness for time-varying networks. We show: If for any $p \ge0$ the processes that describe the success of communication between agents in a SSC network are $\alpha$-mixing with $n^{p-1}\alpha(n)$ summable, then the associated AoI processes are stochastically dominated by a random variable with finite $p$-th moment. In combination with our first contribution, this implies that distributed stochastic gradient descend converges in the presence of AoI, if $\alpha(n)$ is summable.}}, author = {{Redder, Adrian and Ramaswamy, Arunselvan and Karl, Holger}}, booktitle = {{arXiv:2201.11343}}, title = {{{Distributed gradient-based optimization in the presence of dependent aperiodic communication}}}, year = {{2022}}, } @unpublished{30791, abstract = {{We present sufficient conditions that ensure convergence of the multi-agent Deep Deterministic Policy Gradient (DDPG) algorithm. It is an example of one of the most popular paradigms of Deep Reinforcement Learning (DeepRL) for tackling continuous action spaces: the actor-critic paradigm. In the setting considered herein, each agent observes a part of the global state space in order to take local actions, for which it receives local rewards. For every agent, DDPG trains a local actor (policy) and a local critic (Q-function). The analysis shows that multi-agent DDPG using neural networks to approximate the local policies and critics converge to limits with the following properties: The critic limits minimize the average squared Bellman loss; the actor limits parameterize a policy that maximizes the local critic's approximation of $Q_i^*$, where $i$ is the agent index. The averaging is with respect to a probability distribution over the global state-action space. It captures the asymptotics of all local training processes. Finally, we extend the analysis to a fully decentralized setting where agents communicate over a wireless network prone to delays and losses; a typical scenario in, e.g., robotic applications.}}, author = {{Redder, Adrian and Ramaswamy, Arunselvan and Karl, Holger}}, booktitle = {{arXiv:2201.00570}}, title = {{{Asymptotic Convergence of Deep Multi-Agent Actor-Critic Algorithms}}}, year = {{2022}}, } @article{32854, author = {{Redder, Adrian and Ramaswamy, Arunselvan and Karl, Holger}}, journal = {{IFAC-PapersOnLine}}, number = {{13}}, pages = {{133–138}}, publisher = {{Elsevier}}, title = {{{Practical Network Conditions for the Convergence of Distributed Optimization}}}, volume = {{55}}, year = {{2022}}, } @inproceedings{33491, author = {{Maack, Marten and Pukrop, Simon and Rasmussen, Anna Rodriguez}}, booktitle = {{30th Annual European Symposium on Algorithms, ESA 2022, September 5-9, 2022, Berlin/Potsdam, Germany}}, editor = {{Chechik, Shiri and Navarro, Gonzalo and Rotenberg, Eva and Herman, Grzegorz}}, pages = {{77:1–77:13}}, publisher = {{Schloss Dagstuhl - Leibniz-Zentrum für Informatik}}, title = {{{(In-)Approximability Results for Interval, Resource Restricted, and Low Rank Scheduling}}}, doi = {{10.4230/LIPIcs.ESA.2022.77}}, volume = {{244}}, year = {{2022}}, } @inproceedings{29220, abstract = {{Modern services often comprise several components, such as chained virtual network functions, microservices, or machine learning functions. Providing such services requires to decide how often to instantiate each component, where to place these instances in the network, how to chain them and route traffic through them. To overcome limitations of conventional, hardwired heuristics, deep reinforcement learning (DRL) approaches for self-learning network and service management have emerged recently. These model-free DRL approaches are more flexible but typically learn tabula rasa, i.e., disregard existing understanding of networks, services, and their coordination. Instead, we propose FutureCoord, a novel model-based AI approach that leverages existing understanding of networks and services for more efficient and effective coordination without time-intensive training. FutureCoord combines Monte Carlo Tree Search with a stochastic traffic model. This allows FutureCoord to estimate the impact of future incoming traffic and effectively optimize long-term effects, taking fluctuating demand and Quality of Service (QoS) requirements into account. Our extensive evaluation based on real-world network topologies, services, and traffic traces indicates that FutureCoord clearly outperforms state-of-the-art model-free and model-based approaches with up to 51% higher flow success ratios.}}, author = {{Werner, Stefan and Schneider, Stefan Balthasar and Karl, Holger}}, booktitle = {{IEEE/IFIP Network Operations and Management Symposium (NOMS)}}, keywords = {{network management, service management, AI, Monte Carlo Tree Search, model-based, QoS}}, location = {{Budapest}}, publisher = {{IEEE}}, title = {{{Use What You Know: Network and Service Coordination Beyond Certainty}}}, year = {{2022}}, } @inbook{29872, author = {{Maack, Marten and Meyer auf der Heide, Friedhelm and Pukrop, Simon}}, booktitle = {{Approximation and Online Algorithms}}, isbn = {{9783030927011}}, issn = {{0302-9743}}, publisher = {{Springer International Publishing}}, title = {{{Server Cloud Scheduling}}}, doi = {{10.1007/978-3-030-92702-8_10}}, year = {{2022}}, } @inproceedings{20125, abstract = {{Datacenter applications have different resource requirements from network and developing flow scheduling heuristics for every workload is practically infeasible. In this paper, we show that deep reinforcement learning (RL) can be used to efficiently learn flow scheduling policies for different workloads without manual feature engineering. Specifically, we present LFS, which learns to optimize a high-level performance objective, e.g., maximize the number of flow admissions while meeting the deadlines. The LFS scheduler is trained through deep RL to learn a scheduling policy on continuous online flow arrivals. The evaluation results show that the trained LFS scheduler admits 1.05x more flows than the greedy flow scheduling heuristics under varying network load.}}, author = {{Hasnain, Asif and Karl, Holger}}, booktitle = {{2021 IEEE 18th Annual Consumer Communications & Networking Conference (CCNC)}}, keywords = {{Flow scheduling, Deadlines, Reinforcement learning}}, location = {{Las Vegas, USA}}, publisher = {{IEEE Computer Society}}, title = {{{Learning Flow Scheduling}}}, doi = {{https://doi.org/10.1109/CCNC49032.2021.9369514}}, year = {{2021}}, } @inproceedings{21005, abstract = {{Data-parallel applications are developed using different data programming models, e.g., MapReduce, partition/aggregate. These models represent diverse resource requirements of application in a datacenter network, which can be represented by the coflow abstraction. The conventional method of creating hand-crafted coflow heuristics for admission or scheduling for different workloads is practically infeasible. In this paper, we propose a deep reinforcement learning (DRL)-based coflow admission scheme -- LCS -- that can learn an admission policy for a higher-level performance objective, i.e., maximize successful coflow admissions, without manual feature engineering. LCS is trained on a production trace, which has online coflow arrivals. The evaluation results show that LCS is able to learn a reasonable admission policy that admits more coflows than state-of-the-art Varys heuristic while meeting their deadlines.}}, author = {{Hasnain, Asif and Karl, Holger}}, booktitle = {{IEEE INFOCOM 2021 - IEEE Conference on Computer Communications Workshops (INFOCOM WKSHPS)}}, keywords = {{Coflow scheduling, Reinforcement learning, Deadlines}}, location = {{Vancouver BC Canada}}, publisher = {{IEEE Communications Society}}, title = {{{Learning Coflow Admissions}}}, doi = {{10.1109/INFOCOMWKSHPS51825.2021.9484599}}, year = {{2021}}, } @inproceedings{21543, abstract = {{Services often consist of multiple chained components such as microservices in a service mesh, or machine learning functions in a pipeline. Providing these services requires online coordination including scaling the service, placing instance of all components in the network, scheduling traffic to these instances, and routing traffic through the network. Optimized service coordination is still a hard problem due to many influencing factors such as rapidly arriving user demands and limited node and link capacity. Existing approaches to solve the problem are often built on rigid models and assumptions, tailored to specific scenarios. If the scenario changes and the assumptions no longer hold, they easily break and require manual adjustments by experts. Novel self-learning approaches using deep reinforcement learning (DRL) are promising but still have limitations as they only address simplified versions of the problem and are typically centralized and thus do not scale to practical large-scale networks. To address these issues, we propose a distributed self-learning service coordination approach using DRL. After centralized training, we deploy a distributed DRL agent at each node in the network, making fast coordination decisions locally in parallel with the other nodes. Each agent only observes its direct neighbors and does not need global knowledge. Hence, our approach scales independently from the size of the network. In our extensive evaluation using real-world network topologies and traffic traces, we show that our proposed approach outperforms a state-of-the-art conventional heuristic as well as a centralized DRL approach (60% higher throughput on average) while requiring less time per online decision (1 ms).}}, author = {{Schneider, Stefan Balthasar and Qarawlus, Haydar and Karl, Holger}}, booktitle = {{IEEE International Conference on Distributed Computing Systems (ICDCS)}}, keywords = {{network management, service management, coordination, reinforcement learning, distributed}}, location = {{Washington, DC, USA}}, publisher = {{IEEE}}, title = {{{Distributed Online Service Coordination Using Deep Reinforcement Learning}}}, year = {{2021}}, } @inproceedings{20693, abstract = {{In practical, large-scale networks, services are requested by users across the globe, e.g., for video streaming. Services consist of multiple interconnected components such as microservices in a service mesh. Coordinating these services requires scaling them according to continuously changing user demand, deploying instances at the edge close to their users, and routing traffic efficiently between users and connected instances. Network and service coordination is commonly addressed through centralized approaches, where a single coordinator knows everything and coordinates the entire network globally. While such centralized approaches can reach global optima, they do not scale to large, realistic networks. In contrast, distributed approaches scale well, but sacrifice solution quality due to their limited scope of knowledge and coordination decisions. To this end, we propose a hierarchical coordination approach that combines the good solution quality of centralized approaches with the scalability of distributed approaches. In doing so, we divide the network into multiple hierarchical domains and optimize coordination in a top-down manner. We compare our hierarchical with a centralized approach in an extensive evaluation on a real-world network topology. Our results indicate that hierarchical coordination can find close-to-optimal solutions in a fraction of the runtime of centralized approaches.}}, author = {{Schneider, Stefan Balthasar and Jürgens, Mirko and Karl, Holger}}, booktitle = {{IFIP/IEEE International Symposium on Integrated Network Management (IM)}}, keywords = {{network management, service management, coordination, hierarchical, scalability, nfv}}, location = {{Bordeaux, France}}, publisher = {{IFIP/IEEE}}, title = {{{Divide and Conquer: Hierarchical Network and Service Coordination}}}, year = {{2021}}, } @article{21808, abstract = {{Modern services consist of interconnected components,e.g., microservices in a service mesh or machine learning functions in a pipeline. These services can scale and run across multiple network nodes on demand. To process incoming traffic, service components have to be instantiated and traffic assigned to these instances, taking capacities, changing demands, and Quality of Service (QoS) requirements into account. This challenge is usually solved with custom approaches designed by experts. While this typically works well for the considered scenario, the models often rely on unrealistic assumptions or on knowledge that is not available in practice (e.g., a priori knowledge). We propose DeepCoord, a novel deep reinforcement learning approach that learns how to best coordinate services and is geared towards realistic assumptions. It interacts with the network and relies on available, possibly delayed monitoring information. Rather than defining a complex model or an algorithm on how to achieve an objective, our model-free approach adapts to various objectives and traffic patterns. An agent is trained offline without expert knowledge and then applied online with minimal overhead. Compared to a state-of-the-art heuristic, DeepCoord significantly improves flow throughput (up to 76%) and overall network utility (more than 2x) on realworld network topologies and traffic traces. It also supports optimizing multiple, possibly competing objectives, learns to respect QoS requirements, generalizes to scenarios with unseen, stochastic traffic, and scales to large real-world networks. For reproducibility and reuse, our code is publicly available.}}, author = {{Schneider, Stefan Balthasar and Khalili, Ramin and Manzoor, Adnan and Qarawlus, Haydar and Schellenberg, Rafael and Karl, Holger and Hecker, Artur}}, journal = {{Transactions on Network and Service Management}}, keywords = {{network management, service management, coordination, reinforcement learning, self-learning, self-adaptation, multi-objective}}, publisher = {{IEEE}}, title = {{{Self-Learning Multi-Objective Service Coordination Using Deep Reinforcement Learning}}}, doi = {{10.1109/TNSM.2021.3076503}}, year = {{2021}}, }