@misc{1186,
author = {Kemper, Arne},
publisher = {Universität Paderborn},
title = {{Pure Nash Equilibria in Robust Congestion Games via Potential Functions}},
year = {2018},
}
@article{1369,
abstract = {In budget games, players compete over resources with finite budgets. For every resource, a player has a specific demand and as a strategy, he chooses a subset of resources. If the total demand on a resource does not exceed its budget, the utility of each player who chose that resource equals his demand. Otherwise, the budget is shared proportionally. In the general case, pure Nash equilibria (NE) do not exist for such games. In this paper, we consider the natural classes of singleton and matroid budget games with additional constraints and show that for each, pure NE can be guaranteed. In addition, we introduce a lexicographical potential function to prove that every matroid budget game has an approximate pure NE which depends on the largest ratio between the different demands of each individual player.},
author = {Drees, Maximilian and Feldotto, Matthias and Riechers, Sören and Skopalik, Alexander},
issn = {1382-6905},
journal = {Journal of Combinatorial Optimization},
publisher = {Springer Nature},
title = {{Pure Nash equilibria in restricted budget games}},
doi = {10.1007/s10878-018-0269-7},
year = {2018},
}
@inproceedings{4565,
author = {Jung, Daniel and Kolb, Christina and Scheideler, Christian and Sundermeier, Jannik},
booktitle = {Proceedings of the 30th on Symposium on Parallelism in Algorithms and Architectures (SPAA)},
isbn = {9781450357999},
location = {Wien},
publisher = {ACM Press},
title = {{Brief Announcement: Competitive Routing in Hybrid Communication Networks}},
doi = {10.1145/3210377.3210663},
year = {2018},
}
@article{2848,
author = {Li, Shouwei and Markarian, Christine and Meyer auf der Heide, Friedhelm},
journal = {Algorithmica},
number = {5},
pages = {1556–1574},
publisher = {Springer},
title = {{Towards Flexible Demands in Online Leasing Problems. }},
doi = {10.1007/s00453-018-0420-y},
volume = {80},
year = {2018},
}
@inproceedings{4375,
abstract = {We present a peer-to-peer network that supports the efficient processing of orthogonal range queries $R=\bigtimes_{i=1}^{d}[a_i,\,b_i]$ in a $d$-dimensional point space.\\
The network is the same for each dimension, namely a distance halving network like the one introduced by Naor and Wieder (ACM TALG'07).
We show how to execute such range queries using $\mathcal{O}\left(2^{d'}d\,\log m + d\,|R|\right)$ hops (and the same number of messages) in total. Here $[m]^d$ is the ground set, $|R|$ is the size and $d'$ the dimension of the queried range.
Furthermore, if the peers form a distributed network, the query can be answered in $\mathcal{O}\left(d\,\log m + d\,\sum_{i=1}^{d}(b_i-a_i+1)\right)$ communication rounds.
Our algorithms are based on a mapping of the Hilbert Curve through $[m]^d$ to the peers.},
author = {Benter, Markus and Knollmann, Till and Meyer auf der Heide, Friedhelm and Setzer, Alexander and Sundermeier, Jannik},
booktitle = {Proceedings of the 4th International Symposium on Algorithmic Aspects of Cloud Computing (ALGOCLOUD)},
keyword = {Distributed Storage, Multi-Dimensional Range Queries, Peer-to-Peer, Hilbert Curve},
location = {Helsinki},
title = {{A Peer-to-Peer based Cloud Storage supporting orthogonal Range Queries of arbitrary Dimension}},
doi = {10.1007/978-3-030-19759-9_4},
year = {2018},
}
@misc{1187,
author = {Nachtigall, Marcel},
publisher = {Universität Paderborn},
title = {{Scenario-driven Strategy Analysis in a n-player Composition Game Model}},
year = {2018},
}
@article{669,
abstract = {We study a new class of games which generalizes congestion games andits bottleneck variant. We introduce congestion games with mixed objectives to modelnetwork scenarios in which players seek to optimize for latency and bandwidths alike.We characterize the (non-)existence of pure Nash equilibria (PNE), the convergenceof improvement dynamics, the quality of equilibria and show the complexity of thedecision problem. For games that do not possess PNE we give bounds on the approx-imation ratio of approximate pure Nash equilibria.},
author = {Feldotto, Matthias and Leder, Lennart and Skopalik, Alexander},
issn = {1382-6905},
journal = {Journal of Combinatorial Optimization},
number = {4},
pages = {1145--1167},
publisher = {Springer Nature},
title = {{Congestion games with mixed objectives}},
doi = {10.1007/s10878-017-0189-y},
volume = {36},
year = {2018},
}
@article{3551,
author = {König, Jürgen and Mäcker, Alexander and Meyer auf der Heide, Friedhelm and Riechers, Sören},
journal = {Journal of Combinatorial Optimization},
number = {4},
pages = {1356--1379},
title = {{Scheduling with interjob communication on parallel processors}},
doi = {10.1007/s10878-018-0325-3},
volume = {36},
year = {2018},
}
@article{2849,
author = {Abu-Khzam, Faisal N. and Markarian, Christine and Meyer auf der Heide, Friedhelm and Schubert, Michael},
journal = {Theory of Computing Systems},
publisher = {Springer},
title = {{Approximation and Heuristic Algorithms for Computing Backbones in Asymmetric Ad-hoc Networks}},
doi = {10.1007/s00224-017-9836-z},
year = {2018},
}
@inproceedings{2484,
abstract = {We study the classic bin packing problem in a fully-dynamic setting, where new items can arrive and old items may depart. We want algorithms with low asymptotic competitive ratio while repacking items sparingly between updates. Formally, each item i has a movement cost c_i >= 0, and we want to use alpha * OPT bins and incur a movement cost gamma * c_i, either in the worst case, or in an amortized sense, for alpha, gamma as small as possible. We call gamma the recourse of the algorithm. This is motivated by cloud storage applications, where fully-dynamic bin packing models the problem of data backup to minimize the number of disks used, as well as communication incurred in moving file backups between disks. Since the set of files changes over time, we could recompute a solution periodically from scratch, but this would give a high number of disk rewrites, incurring a high energy cost and possible wear and tear of the disks. In this work, we present optimal tradeoffs between number of bins used and number of items repacked, as well as natural extensions of the latter measure.},
author = {Feldkord, Björn and Feldotto, Matthias and Gupta, Anupam and Guruganesh, Guru and Kumar, Amit and Riechers, Sören and Wajc, David},
booktitle = {45th International Colloquium on Automata, Languages, and Programming (ICALP 2018)},
editor = {Chatzigiannakis, Ioannis and Kaklamanis, Christos and Marx, Dániel and Sannella, Donald},
isbn = {978-3-95977-076-7},
issn = {1868-8969},
location = {Prag},
pages = {51:1--51:24},
publisher = {Schloss Dagstuhl--Leibniz-Zentrum fuer Informatik},
title = {{Fully-Dynamic Bin Packing with Little Repacking}},
doi = {10.4230/LIPIcs.ICALP.2018.51},
volume = {107},
year = {2018},
}
@misc{1188,
author = {Kempf, Jérôme},
publisher = {Universität Paderborn},
title = {{Learning deterministic bandit behaviour form compositions}},
year = {2018},
}
@inproceedings{4411,
abstract = {While a lot of research in distributed computing has covered solutions for self-stabilizing computing and topologies, there is far less work on self-stabilization for distributed data structures.
Considering crashing peers in peer-to-peer networks, it should not be taken for granted that a distributed data structure remains intact.
In this work, we present a self-stabilizing protocol for a distributed data structure called the hashed Patricia Trie (Kniesburges and Scheideler WALCOM'11) that enables efficient prefix search on a set of keys.
The data structure has a wide area of applications including string matching problems while offering low overhead and efficient operations when embedded on top of a distributed hash table.
Especially, longest prefix matching for $x$ can be done in $\mathcal{O}(\log |x|)$ hash table read accesses.
We show how to maintain the structure in a self-stabilizing way.
Our protocol assures low overhead in a legal state and a total (asymptotically optimal) memory demand of $\Theta(d)$ bits, where $d$ is the number of bits needed for storing all keys.},
author = {Knollmann, Till and Scheideler, Christian},
booktitle = {Proceedings of the 20th International Symposium on Stabilization, Safety, and Security of Distributed Systems (SSS)},
editor = {Izumi, Taisuke and Kuznetsov, Petr},
keyword = {Self-Stabilizing, Prefix Search, Distributed Data Structure},
location = {Tokyo},
publisher = {Springer, Cham},
title = {{A Self-Stabilizing Hashed Patricia Trie}},
doi = {10.1007/978-3-030-03232-6_1},
volume = {11201},
year = {2018},
}
@inproceedings{2485,
author = {Feldkord, Björn and Meyer auf der Heide, Friedhelm},
booktitle = {Proceedings of the 30th ACM Symposium on Parallelism in Algorithms and Architectures (SPAA)},
location = {Wien},
pages = {373 -- 381 },
publisher = {ACM},
title = {{Online Facility Location with Mobile Facilities}},
doi = {10.1145/3210377.3210389},
year = {2018},
}
@misc{3851,
author = {Koop, Samuel},
publisher = {Universität Paderborn},
title = {{Congestion Games mit gewichteten Strategien}},
year = {2018},
}
@phdthesis{1209,
author = {Jung, Daniel},
publisher = {Universität Paderborn},
title = {{Local Strategies for Swarm Formations on a Grid}},
doi = {10.17619/UNIPB/1-271},
year = {2018},
}
@inproceedings{4563,
abstract = {Routing is a challenging problem for wireless ad hoc networks, especially when the nodes are mobile and spread so widely that in most cases multiple hops are needed to route a message from one node to another. In fact, it is known that any online routing protocol has a poor performance in the worst case, in a sense that there is a distribution of nodes resulting in bad routing paths for that protocol, even if the nodes know their geographic positions and the geographic position of the destination of a message is known. The reason for that is that radio holes in the ad hoc network may require messages to take long detours in order to get to a destination, which are hard to find in an online fashion.
In this paper, we assume that the wireless ad hoc network can make limited use of long-range links provided by a global communication infrastructure like a cellular infrastructure or a satellite in order to compute an abstraction of the wireless ad hoc network that allows the messages to be sent along near-shortest paths in the ad hoc network. We present distributed algorithms that compute an abstraction of the ad hoc network in $\mathcal{O}\left(\log ^2 n\right)$ time using long-range links, which results in $c$-competitive routing paths between any two nodes of the ad hoc network for some constant $c$ if the convex hulls of the radio holes do not intersect. We also show that the storage needed for the abstraction just depends on the number and size of the radio holes in the wireless ad hoc network and is independent on the total number of nodes, and this information just has to be known to a few nodes for the routing to work.
},
author = {Jung, Daniel and Kolb, Christina and Scheideler, Christian and Sundermeier, Jannik},
booktitle = {Proceedings of the 14th International Symposium on Algorithms and Experiments for Wireless Networks (ALGOSENSORS) },
keyword = {greedy routing, ad hoc networks, convex hulls, c-competitiveness},
location = {Helsinki},
publisher = {Springer},
title = {{Competitive Routing in Hybrid Communication Networks}},
year = {2018},
}
@misc{5403,
author = {Geromel, Marcel},
title = {{Mobile Facility Leasing}},
year = {2018},
}
@inproceedings{112,
abstract = {We study a model of selfish resource allocation that seeks to incorporate dependencies among resources as they exist in in modern networked environments. Our model is inspired by utility functions with constant elasticity of substitution (CES) which is a well-studied model in economics. We consider congestion games with different aggregation functions. In particular, we study $L_p$ norms and analyze the existence and complexity of (approximate) pure Nash equilibria. Additionally, we give an almost tight characterization based on monotonicity properties to describe the set of aggregation functions that guarantee the existence of pure Nash equilibria.},
author = {Feldotto, Matthias and Leder, Lennart and Skopalik, Alexander},
booktitle = {Proceedings of the 10th International Conference on Algorithms and Complexity (CIAC)},
pages = {222----233},
title = {{Congestion Games with Complementarities}},
doi = {10.1007/978-3-319-57586-5_19},
year = {2017},
}
@article{706,
author = {Mäcker, Alexander and Malatyali, Manuel and Meyer auf der Heide, Friedhelm and Riechers, Sören},
journal = {Journal of Combinatorial Optimization},
number = {4},
pages = {1168--1194},
publisher = {Springer},
title = {{Cost-efficient Scheduling on Machines from the Cloud}},
doi = {10.1007/s10878-017-0198-x},
volume = {36},
year = {2017},
}
@inproceedings{66,
abstract = {In budget games, players compete over resources with finite budgets. For every resource, a player has a specific demand and as a strategy, he chooses a subset of resources. If the total demand on a resource does not exceed its budget, the utility of each player who chose that resource equals his demand. Otherwise, the budget is shared proportionally. In the general case, pure Nash equilibria (NE) do not exist for such games. In this paper, we consider the natural classes of singleton and matroid budget games with additional constraints and show that for each, pure NE can be guaranteed. In addition, we introduce a lexicographical potential function to prove that every matroid budget game has an approximate pure NE which depends on the largest ratio between the different demands of each individual player.},
author = {Drees, Maximilian and Feldotto, Matthias and Riechers, Sören and Skopalik, Alexander},
booktitle = {Proceedings of the 23rd International Computing and Combinatorics Conference (COCOON)},
pages = {175----187},
title = {{Pure Nash Equilibria in Restricted Budget Games}},
doi = {10.1007/978-3-319-62389-4_15},
year = {2017},
}
@inproceedings{59,
abstract = {We consider a scheduling problem on $m$ identical processors sharing an arbitrarily divisible resource. In addition to assigning jobs to processors, the scheduler must distribute the resource among the processors (e.g., for three processors in shares of 20\%, 15\%, and 65\%) and adjust this distribution over time. Each job $j$ comes with a size $p_j \in \mathbb{R}$ and a resource requirement $r_j > 0$. Jobs do not benefit when receiving a share larger than $r_j$ of the resource. But providing them with a fraction of the resource requirement causes a linear decrease in the processing efficiency. We seek a (non-preemptive) job and resource assignment minimizing the makespan.Our main result is an efficient approximation algorithm which achieves an approximation ratio of $2 + 1/(m-2)$. It can be improved to an (asymptotic) ratio of $1 + 1/(m-1)$ if all jobs have unit size. Our algorithms also imply new results for a well-known bin packing problem with splittable items and a restricted number of allowed item parts per bin.Based upon the above solution, we also derive an approximation algorithm with similar guarantees for a setting in which we introduce so-called tasks each containing several jobs and where we are interested in the average completion time of tasks (a task is completed when all its jobs are completed).},
author = {Kling, Peter and Mäcker, Alexander and Riechers, Sören and Skopalik, Alexander},
booktitle = {Proceedings of the 29th ACM Symposium on Parallelism in Algorithms and Architectures (SPAA)},
pages = {123----132},
title = {{Sharing is Caring: Multiprocessor Scheduling with a Sharable Resource}},
doi = {10.1145/3087556.3087578},
year = {2017},
}
@misc{1073,
author = {Nachtigall, Simon},
publisher = {Universität Paderborn},
title = {{Sortieren dynamischer Daten}},
year = {2017},
}
@misc{1080,
author = {Bürmann, Jan},
publisher = {Universität Paderborn},
title = {{Complexity of Signalling in Routing Games under Uncertainty}},
year = {2017},
}
@inproceedings{113,
abstract = {We study the computation of approximate pure Nash equilibria in Shapley value (SV) weighted congestion games, introduced in [19]. This class of games considers weighted congestion games in which Shapley values are used as an alternative (to proportional shares) for distributing the total cost of each resource among its users. We focus on the interesting subclass of such games with polynomial resource cost functions and present an algorithm that computes approximate pure Nash equilibria with a polynomial number of strategy updates. Since computing a single strategy update is hard, we apply sampling techniques which allow us to achieve polynomial running time. The algorithm builds on the algorithmic ideas of [7], however, to the best of our knowledge, this is the first algorithmic result on computation of approximate equilibria using other than proportional shares as player costs in this setting. We present a novel relation that approximates the Shapley value of a player by her proportional share and vice versa. As side results, we upper bound the approximate price of anarchy of such games and significantly improve the best known factor for computing approximate pure Nash equilibria in weighted congestion games of [7].},
author = {Feldotto, Matthias and Gairing, Martin and Kotsialou, Grammateia and Skopalik, Alexander},
booktitle = {Proceedings of the 13th International Conference on Web and Internet Economics (WINE)},
title = {{Computing Approximate Pure Nash Equilibria in Shapley Value Weighted Congestion Games}},
doi = {10.1007/978-3-319-71924-5_14},
year = {2017},
}
@inproceedings{79,
abstract = {Consider a problem in which $n$ jobs that are classified into $k$ types arrive over time at their release times and are to be scheduled on a single machine so as to minimize the maximum flow time.The machine requires a setup taking $s$ time units whenever it switches from processing jobs of one type to jobs of a different type.We consider the problem as an online problem where each job is only known to the scheduler as soon as it arrives and where the processing time of a job only becomes known upon its completion (non-clairvoyance).We are interested in the potential of simple ``greedy-like'' algorithms.We analyze a modification of the FIFO strategy and show its competitiveness to be $\Theta(\sqrt{n})$, which is optimal for the considered class of algorithms.For $k=2$ types it achieves a constant competitiveness.Our main insight is obtained by an analysis of the smoothed competitiveness.If processing times $p_j$ are independently perturbed to $\hat p_j = (1+X_j)p_j$, we obtain a competitiveness of $O(\sigma^{-2} \log^2 n)$ when $X_j$ is drawn from a uniform or a (truncated) normal distribution with standard deviation $\sigma$.The result proves that bad instances are fragile and ``practically'' one might expect a much better performance than given by the $\Omega(\sqrt{n})$-bound.},
author = {Mäcker, Alexander and Malatyali, Manuel and Meyer auf der Heide, Friedhelm and Riechers, Sören},
booktitle = {Proceedings of the 15th Workshop on Approximation and Online Algorithms (WAOA)},
pages = {207--222},
publisher = {Springer},
title = {{Non-Clairvoyant Scheduling to Minimize Max Flow Time on a Machine with Setup Times}},
doi = {10.1007/978-3-319-89441-6},
volume = {10787},
year = {2017},
}
@inproceedings{55,
abstract = {We introduce the mobile server problem, inspired by current trends to move computational tasks from cloud structures to multiple devices close to the end user. An example for this are embedded systems in autonomous cars that communicate in order to coordinate their actions. Our model is a variant of the classical Page Migration Problem. Moreformally, we consider a mobile server holding a data page.The server can move in the Euclidean space (of arbitrary dimension). In every round, requests for data items from the page pop up at arbitrary points in the space. The requests are served, each at a cost of the distance from the requesting point and the server, and the mobile server may move, at a cost D times the distance traveled for some constant D . We assume a maximum distance m the server is allowed to move per round. We show that no online algorithm can achieve a competitive ratio independent of the length of the input sequence in this setting. Hence we augment the maximum movement distance of the online algorithms to ( 1 + δ) times the maximum distance of the offline solution. We provide a deterministic algorithm which is simple to describe and works for multiple variants of our problem. The algorithm achieves almost tight competitive ratios independent of the length of the input sequence.},
author = {Feldkord, Björn and Meyer auf der Heide, Friedhelm},
booktitle = {Proceedings of the 29th ACM Symposium on Parallelism in Algorithms and Architectures (SPAA)},
pages = {313--319},
title = {{The Mobile Server Problem}},
doi = {10.1145/3087556.3087575},
year = {2017},
}
@inproceedings{2851,
author = {Markarian, Christine},
booktitle = {International Conference on Operations Research (OR)},
location = {Berlin},
title = {{Leasing with Uncertainty}},
doi = {10.1007/978-3-319-89920-6_57},
year = {2017},
}
@misc{1074,
author = {Pukrop, Simon},
publisher = {Universität Paderborn},
title = {{Robuste Optimierung in Congestion Games}},
year = {2017},
}
@misc{1081,
author = {Vijayalakshmi, Vipin Ravindran},
publisher = {Universität Paderborn},
title = {{Bounding the Inefficiency of Equilibria in Congestion Games under Taxation}},
year = {2017},
}
@phdthesis{703,
author = {Podlipyan, Pavel},
publisher = {Universität Paderborn},
title = {{Local Algorithms for the Continuous Gathering Problem}},
year = {2017},
}
@inproceedings{82,
abstract = {Many graph problems such as maximum cut, chromatic number, hamiltonian cycle, and edge dominating set are known to be fixed-parameter tractable (FPT) when parameterized by the treewidth of the input graphs, but become W-hard with respect to the clique-width parameter. Recently, Gajarský et al. proposed a new parameter called modular-width using the notion of modular decomposition of graphs. They showed that the chromatic number problem and the partitioning into paths problem, and hence hamiltonian path and hamiltonian cycle, are FPT when parameterized by this parameter. In this paper, we study modular-width in parameterized parallel complexity and show that the weighted maximum clique problem and the maximum matching problem are fixed-parameter parallel-tractable (FPPT) when parameterized by this parameter.},
author = {Abu-Khzam, Faisal N. and Li, Shouwei and Markarian, Christine and Meyer auf der Heide, Friedhelm and Podlipyan, Pavel},
booktitle = {Proceedings of the 11th International Workshop on Frontiers in Algorithmics (FAW)},
pages = {139--150},
title = {{Modular-Width: An Auxiliary Parameter for Parameterized Parallel Complexity}},
doi = {10.1007/978-3-319-59605-1_13},
year = {2017},
}
@article{63,
author = {Althaus, Ernst and Brinkmann, Andre and Kling, Peter and Meyer auf der Heide, Friedhelm and Nagel, Lars and Riechers, Sören and Sgall, Jiri and Suess, Tim},
journal = {Journal of Scheduling},
publisher = {Springer},
title = {{Scheduling Shared Continuous Resources on Many-Cores}},
doi = {10.1007/s10951-017-0518-0},
year = {2017},
}
@misc{695,
author = {Nowack, Joshua},
publisher = {Universität Paderborn},
title = {{On-The-Fly Konstruktion zusammenhängender Straßennetze aus gegebenen Einzelteilen}},
year = {2017},
}
@inproceedings{70,
author = {Feldkord, Björn and Markarian, Christine and Meyer auf der Heide, Friedhelm},
booktitle = {Proceedings of the 11th Annual International Conference on Combinatorial Optimization and Applications (COCOA)},
pages = {17 -- 31},
title = {{Price Fluctuations in Online Leasing}},
doi = {10.1007/978-3-319-71147-8_2},
year = {2017},
}
@inproceedings{1094,
abstract = {Many university students struggle with motivational problems, and gamification has the potential to address these problems. However, gamification is hardly used in education, because current approaches to gamification require instructors to engage in the time-consuming preparation of their course contents for use in quizzes, mini-games and the like. Drawing on research on limited attention and present bias, we propose a "lean" approach to gamification, which relies on gamifying learning activities (rather than learning contents) and increasing their salience. In this paper, we present the app StudyNow that implements such a lean gamification approach. With this app, we aim to enable more students and instructors to benefit from the advantages of gamification.},
author = {Feldotto, Matthias and John, Thomas and Kundisch, Dennis and Hemsen, Paul and Klingsieck, Katrin and Skopalik, Alexander},
booktitle = {Proceedings of the 12th International Conference on Design Science Research in Information Systems and Technology (DESRIST)},
pages = {462--467},
title = {{Making Gamification Easy for the Professor: Decoupling Game and Content with the StudyNow Mobile App}},
doi = {10.1007/978-3-319-59144-5_32},
year = {2017},
}
@phdthesis{704,
author = {Riechers, Sören},
publisher = {Universität Paderborn},
title = {{Scheduling with Scarce Resources}},
doi = {10.17619/UNIPB/1-231},
year = {2017},
}
@article{110,
abstract = {We consider an extension of the dynamic speed scaling scheduling model introduced by Yao et al.: A set of jobs, each with a release time, deadline, and workload, has to be scheduled on a single, speed-scalable processor. Both the maximum allowed speed of the processor and the energy costs may vary continuously over time. The objective is to find a feasible schedule that minimizes the total energy costs. Theoretical algorithm design for speed scaling problems often tends to discretize problems, as our tools in the discrete realm are often better developed or understood. Using the above speed scaling variant with variable, continuous maximal processor speeds and energy prices as an example, we demonstrate that a more direct approach via tools from variational calculus can not only lead to a very concise and elegant formulation and analysis, but also avoids the “explosion of variables/constraints” that often comes with discretizing. Using well-known tools from calculus of variations, we derive combinatorial optimality characteristics for our continuous problem and provide a quite concise and simple correctness proof.},
author = {Antoniadis, Antonios and Kling, Peter and Ott, Sebastian and Riechers, Sören},
journal = {Theoretical Computer Science},
pages = {1--13},
publisher = {Elsevier},
title = {{Continuous Speed Scaling with Variability: A Simple and Direct Approach}},
doi = {10.1016/j.tcs.2017.03.021},
year = {2017},
}
@inproceedings{1095,
abstract = {Many university students struggle with motivational problems, and gamification has the potential to address these problems. However, using gamification currently is rather tedious and time-consuming for instructors because current approaches to gamification require instructors to engage in the time-consuming preparation of course contents (e.g., for quizzes or mini-games). In reply to this issue, we propose a “lean” approach to gamification, which relies on gamifying learning activities rather than learning contents. The learning activities that are gamified in the lean approach can typically be drawn from existing course syllabi (e.g., attend certain lectures, hand in assignments, read book chapters and articles). Hence, compared to existing approaches, lean gamification substantially lowers the time requirements posed on instructors for gamifying a given course. Drawing on research on limited attention and the present bias, we provide the theoretical foundation for the lean gamification approach. In addition, we present a mobile application that implements lean gamification and outline a mixed-methods study that is currently under way for evaluating whether lean gamification does indeed have the potential to increase students’ motivation. We thereby hope to allow more students and instructors to benefit from the advantages of gamification. },
author = {John, Thomas and Feldotto, Matthias and Hemsen, Paul and Klingsieck, Katrin and Kundisch, Dennis and Langendorf, Mike},
booktitle = {Proceedings of the 25th European Conference on Information Systems (ECIS)},
pages = {2970--2979},
title = {{Towards a Lean Approach for Gamifying Education}},
year = {2017},
}
@inproceedings{143,
abstract = {We present an efficient parallel algorithm for the general Monotone Circuit Value Problem (MCVP) with n gates and an underlying graph of bounded genus k. Our algorithm generalizes a recent result by Limaye et al. who showed that MCVP with toroidal embedding (genus 1) is in NC when the input contains a toroidal embedding of the circuit. In addition to extending this result from genus 1 to any bounded genus k, and unlike the work reported by Limaye et al., we do not require a precomputed embedding to be given. Most importantly, our results imply that given a P-complete problem, it is possible to find an algorithm that makes the problem fall into NC by fixing one or more parameters. Hence, we deduce the interesting analogy: Fixed Parameter Parallelizable (FPP) is with respect to P-complete what Fixed Parameter Tractable (FPT) is with respect to NP-complete. Similar work that uses treewidth as parameter was also presented by Elberfeld et al. in [6].},
author = {Abu-Khzam, Faisal N. and Li, Shouwei and Markarian, Christine and Meyer auf der Heide, Friedhelm and Podlipyan, Pavel},
booktitle = {Proceedings of the 22nd International Conference on Computing and Combinatorics (COCOON)},
pages = {92--102},
title = {{The Monotone Circuit Value Problem with Bounded Genus Is in NC}},
doi = {10.1007/978-3-319-42634-1_8},
year = {2016},
}
@inproceedings{207,
abstract = {We consider a scheduling problem where machines need to be rented from the cloud in order to process jobs. There are two types of machines available which can be rented for machine-type dependent prices and for arbitrary durations. However, a machine-type dependent setup time is required before a machine is available for processing. Jobs arrive online over time, have machine-type dependent sizes and have individual deadlines. The objective is to rent machines and schedule jobs so as to meet all deadlines while minimizing the rental cost. Since we observe the slack of jobs to have a fundamental influence on the competitiveness, we study the model when instances are parameterized by their (minimum) slack. An instance is called to have a slack of $\beta$ if, for all jobs, the difference between the job's release time and the latest point in time at which it needs to be started is at least $\beta$. While for $\beta series = {LNCS}},
author = {Mäcker, Alexander and Malatyali, Manuel and Meyer auf der Heide, Friedhelm and Riechers, Sören},
booktitle = {Proceedings of the 10th Annual International Conference on Combinatorial Optimization and Applications (COCOA)},
pages = {578----592},
title = {{Cost-efficient Scheduling on Machines from the Cloud}},
doi = {10.1007/978-3-319-48749-6_42},
year = {2016},
}
@proceedings{163,
editor = {Dressler, Falko and Meyer auf der Heide, Friedhelm},
location = {Paderborn, Germany},
publisher = {ACM},
title = {{Proceedings of the 17th ACM International Symposium on Mobile Ad Hoc Networking and Computing (MobiHoc)}},
doi = {10.1145/2942358},
year = {2016},
}
@misc{187,
booktitle = {Transactions on Parallel Computing (TOPC)},
editor = {Meyer auf der Heide, Friedhelm},
number = {1},
pages = {1},
title = {{Introduction to the Special Issue on SPAA 2014}},
doi = {10.1145/2936716},
year = {2016},
}
@inproceedings{149,
abstract = {In this paper we consider a strategic variant of the online facility location problem. Given is a graph in which each node serves two roles: it is a strategic client stating requests as well as a potential location for a facility. In each time step one client states a request which induces private costs equal to the distance to the closest facility. Before serving, the clients may collectively decide to open new facilities, sharing the corresponding price. Instead of optimizing the global costs, each client acts selfishly. The prices of new facilities vary between nodes and also change over time, but are always bounded by some fixed value α. Both the requests as well as the facility prices are given by an online sequence and are not known in advance.We characterize the optimal strategies of the clients and analyze their overall performance in comparison to a centralized offline solution. If all players optimize their own competitiveness, the global performance of the system is O(√α⋅α) times worse than the offline optimum. A restriction to a natural subclass of strategies improves this result to O(α). We also show that for fixed facility costs, we can find strategies such that this bound further improves to O(√α).},
author = {Drees, Maximilian and Feldkord, Björn and Skopalik, Alexander},
booktitle = {Proceedings of the 10th Annual International Conference on Combinatorial Optimization and Applications (COCOA)},
pages = {593----607},
title = {{Strategic Online Facility Location}},
doi = {10.1007/978-3-319-48749-6_43},
year = {2016},
}
@misc{210,
author = {Leder, Lennart},
publisher = {Universität Paderborn},
title = {{Congestion Games with Mixed Objectives}},
year = {2016},
}
@article{145,
abstract = {Comparative evaluations of peer-to-peer protocols through simulations are a viable approach to judge the performance and costs of the individual protocols in large-scale networks. In order to support this work, we present the peer-to-peer system simulator PeerfactSim.KOM, which we extended over the last years. PeerfactSim.KOM comes with an extensive layer model to support various facets and protocols of peer-to-peer networking. In this article, we describe PeerfactSim.KOM and show how it can be used for detailed measurements of large-scale peer-to-peer networks. We enhanced PeerfactSim.KOM with a fine-grained analyzer concept, with exhaustive automated measurements and gnuplot generators as well as a coordination control to evaluate sets of experiment setups in parallel. Thus, by configuring all experiments and protocols only once and starting the simulator, all desired measurements are performed, analyzed, evaluated, and combined, resulting in a holistic environment for the comparative evaluation of peer-to-peer systems. An immediate comparison of different configurations and overlays under different aspects is possible directly after the execution without any manual post-processing. },
author = {Feldotto, Matthias and Graffi, Kalman},
journal = {Concurrency and Computation: Practice and Experience},
number = {5},
pages = {1655--1677},
publisher = {Wiley Online Library},
title = {{Systematic evaluation of peer-to-peer systems using PeerfactSim.KOM}},
doi = {10.1002/cpe.3716},
volume = {28},
year = {2016},
}
@inproceedings{157,
abstract = {Consider a scheduling problem in which a set of jobs with interjob communication, canonically represented by a weighted tree, needs to be scheduled on m parallel processors interconnected by a shared communication channel. In each time step, we may allow any processed job to use a certain capacity of the channel in order to satisfy (parts of) its communication demands to adjacent jobs processed in parallel. The goal is to find a schedule that minimizes the makespan and in which communication demands of all jobs are satisfied.We show that this problem is NP-hard in the strong sense even if the number of processors and the maximum degree of the underlying tree is constant.Consequently, we design and analyze simple approximation algorithms with asymptotic approximation ratio 2-2/m in case of paths and a ratio of 5/2 in case of arbitrary trees.},
author = {König, Jürgen and Mäcker, Alexander and Meyer auf der Heide, Friedhelm and Riechers, Sören},
booktitle = {Proceedings of the 10th Annual International Conference on Combinatorial Optimization and Applications (COCOA)},
pages = {563----577},
title = {{Scheduling with Interjob Communication on Parallel Processors}},
doi = {10.1007/978-3-319-48749-6_41},
year = {2016},
}
@misc{5406,
author = {Bülling, Jonas},
title = {{Parallelisierung von Algorithmen zur IR-Luftbildanalyse von Laubholzmischbeständen zur Verifizierung der Ausbreitung von Eichenkomplexschäden}},
year = {2016},
}
@misc{688,
author = {Kutzias, Damian},
publisher = {Universität Paderborn},
title = {{Friendship Processes in Network Creation Games}},
year = {2016},
}
@inproceedings{209,
abstract = {We study a new class of games which generalizes congestion games and its bottleneck variant. We introduce congestion games with mixed objectives to model network scenarios in which players seek to optimize for latency and bandwidths alike. We characterize the existence of pure Nash equilibria (PNE) and the convergence of improvement dynamics. For games that do not possess PNE we give bounds on the approximation ratio of approximate pure Nash equilibria.},
author = {Feldotto, Matthias and Leder, Lennart and Skopalik, Alexander},
booktitle = {Proceedings of the 10th Annual International Conference on Combinatorial Optimization and Applications (COCOA)},
pages = {655----669},
title = {{Congestion Games with Mixed Objectives}},
doi = {10.1007/978-3-319-48749-6_47},
year = {2016},
}
@misc{1082,
author = {Handirk, Tobias},
publisher = {Universität Paderborn},
title = {{Über die Rolle von Informationen in Verkehrsnetzwerken}},
year = {2016},
}
@article{139,
abstract = {We consider online optimization problems in which certain goods have to be acquired in order to provide a service or infrastructure. Classically, decisions for such problems are considered as final: one buys the goods. However, in many real world applications, there is a shift away from the idea of buying goods. Instead, leasing is often a more flexible and lucrative business model. Research has realized this shift and recently initiated the theoretical study of leasing models (Anthony and Gupta in Proceedings of the integer programming and combinatorial optimization: 12th International IPCO Conference, Ithaca, NY, USA, June 25–27, 2007; Meyerson in Proceedings of the 46th Annual IEEE Symposium on Foundations of Computer Science (FOCS 2005), 23–25 Oct 2005, Pittsburgh, PA, USA, 2005; Nagarajan and Williamson in Discret Optim 10(4):361–370, 2013) We extend this line of work and suggest a more systematic study of leasing aspects for a class of online optimization problems. We provide two major technical results. We introduce the leasing variant of online set multicover and give an O(log(mK)logn)-competitive algorithm (with n, m, and K being the number of elements, sets, and leases, respectively). Our results also imply improvements for the non-leasing variant of online set cover. Moreover, we extend results for the leasing variant of online facility location. Nagarajan and Williamson (Discret Optim 10(4):361–370, 2013) gave an O(Klogn)-competitive algorithm for this problem (with n and K being the number of clients and leases, respectively). We remove the dependency on n (and, thereby, on time). In general, this leads to a bound of O(lmaxloglmax) (with the maximal lease length lmax). For many natural problem instances, the bound improves to O(K2).},
author = {Abshoff, Sebastian and Kling, Peter and Markarian, Christine and Meyer auf der Heide, Friedhelm and Pietrzyk, Peter },
journal = {Journal of Combinatorial Optimization},
number = {4},
pages = { 1197----1216},
publisher = {Springer},
title = {{Towards the price of leasing online}},
doi = {10.1007/s10878-015-9915-5},
year = {2016},
}
@inproceedings{177,
abstract = {Efficiently parallelizable parameterized problems have been classified as being either in the class FPP (fixed-parameter parallelizable) or the class PNC (parameterized analog of NC), which contains FPP as a subclass. In this paper, we propose a more restrictive class of parallelizable parameterized problems called fixed-parameter parallel-tractable (FPPT). For a problem to be in FPPT, it should possess an efficient parallel algorithm not only from a theoretical standpoint but in practice as well. The primary distinction between FPPT and FPP is the parallel processor utilization, which is bounded by a polynomial function in the case of FPPT. We initiate the study of FPPT with the well-known k-vertex cover problem. In particular, we present a parallel algorithm that outperforms the best known parallel algorithm for this problem: using O(m) instead of O(n2) parallel processors, the running time improves from 4logn+O(kk) to O(k⋅log3n), where m is the number of edges, n is the number of vertices of the input graph, and k is an upper bound of the size of the sought vertex cover. We also note that a few P-complete problems fall into FPPT including the monotone circuit value problem (MCV) when the underlying graphs are bounded by a constant Euler genus.},
author = {Abu-Khzam, Faisal N. and Li, Shouwei and Markarian, Christine and Meyer auf der Heide, Friedhelm and Podlipyan, Pavel},
booktitle = {Proceedings of the 10th International Conference on Combinatorial Optimization and Applications (COCOA)},
pages = {477--488},
title = {{On the Parameterized Parallel Complexity and the Vertex Cover Problem}},
doi = {10.1007/978-3-319-48749-6_35},
year = {2016},
}
@misc{5407,
author = {Koepe, Jörn},
title = {{Price-Based Allocation Games}},
year = {2016},
}
@misc{689,
author = {Schaefer, Johannes Sebastian},
publisher = {Universität Paderborn},
title = {{Routing Algorithms on Delayed Networks for Disaster Management Support}},
year = {2016},
}
@phdthesis{154,
author = {Cord Landwehr, Andreas},
publisher = {Universität Paderborn},
title = {{Selfish Network Creation - On Variants of Network Creation Games}},
year = {2016},
}
@article{159,
abstract = {Abstract—Max-min fairness (MMF) is a widely known approachto a fair allocation of bandwidth to each of the usersin a network. This allocation can be computed by uniformlyraising the bandwidths of all users without violating capacityconstraints. We consider an extension of these allocations byraising the bandwidth with arbitrary and not necessarily uniformtime-depending velocities (allocation rates). These allocationsare used in a game-theoretic context for routing choices, whichwe formalize in progressive filling games (PFGs). We present avariety of results for equilibria in PFGs. We show that these gamespossess pure Nash and strong equilibria. While computation ingeneral is NP-hard, there are polynomial-time algorithms forprominent classes of Max-Min-Fair Games (MMFG), includingthe case when all users have the same source-destination pair.We characterize prices of anarchy and stability for pure Nashand strong equilibria in PFGs and MMFGs when players havedifferent or the same source-destination pairs. In addition, weshow that when a designer can adjust allocation rates, it is possibleto design games with optimal strong equilibria. Some initial resultson polynomial-time algorithms in this direction are also derived.},
author = {Harks, Tobias and Höfer, Martin and Schewior, Kevin and Skopalik, Alexander},
journal = {IEEE/ACM Transactions on Networking},
number = {4},
pages = {2553 -- 2562},
publisher = {IEEE},
title = {{Routing Games With Progressive Filling}},
doi = {10.1109/TNET.2015.2468571},
year = {2016},
}
@phdthesis{200,
author = {Drees, Maximilian},
publisher = {Universität Paderborn},
title = {{Existence and Properties of Pure Nash Equilibria in Budget Games}},
year = {2016},
}
@misc{251,
author = {Pfannschmidt, Karlson},
publisher = {Universität Paderborn},
title = {{Solving the aggregated bandits problem}},
year = {2015},
}
@phdthesis{270,
author = {Abshoff, Sebastian},
publisher = {Universität Paderborn},
title = {{On the Complexity of Fundamental Problems in Dynamic Ad-hoc Networks}},
year = {2015},
}
@inproceedings{275,
abstract = {We investigate a non-cooperative game-theoretic model for the formation of communication networks by selfish agents. Each agent aims for a central position at minimum cost for creating edges. In particular, the general model (Fabrikant et al., PODC'03) became popular for studying the structure of the Internet or social networks. Despite its significance, locality in this game was first studied only recently (Bilò et al., SPAA'14), where a worst case locality model was presented, which came with a high efficiency loss in terms of quality of equilibria. Our main contribution is a new and more optimistic view on locality: agents are limited in their knowledge and actions to their local view ranges, but can probe different strategies and finally choose the best. We study the influence of our locality notion on the hardness of computing best responses, convergence to equilibria, and quality of equilibria. Moreover, we compare the strength of local versus non-local strategy changes. Our results address the gap between the original model and the worst case locality variant. On the bright side, our efficiency results are in line with observations from the original model, yet we have a non-constant lower bound on the Price of Anarchy.},
author = {Cord-Landwehr, Andreas and Lenzner, Pascal},
booktitle = {Proceedings of the 40th Conference on Mathematical Foundations of Computer Science (MFCS)},
pages = {248----260},
title = {{Network Creation Games: Think Global - Act Local}},
doi = {10.1007/978-3-662-48054-0_21},
year = {2015},
}
@inproceedings{240,
abstract = {We consider online leasing problems in which demands arrive over time and need to be served by leasing resources. We introduce a new model for these problems such that a resource can be leased for K different durations each incurring a different cost (longer leases cost less per time unit). Each demand i can be served anytime between its arrival ai and its deadline ai+di by a leased resource. The objective is to meet all deadlines while minimizing the total leasing costs. This model is a natural generalization of Meyerson’s ParkingPermitProblem (FOCS 2005) in which di=0 for all i. We propose an online algorithm that is Θ(K+dmaxlmin)-competitive where dmax and lmin denote the largest di and the shortest available lease length, respectively. We also extend the SetCoverLeasing problem by deadlines and give a competitive online algorithm which also improves on existing solutions for the original SetCoverLeasing problem.},
author = {Li, Shouwei and Mäcker, Alexander and Markarian, Christine and Meyer auf der Heide, Friedhelm and Riechers, Sören},
booktitle = {Proceedings of the 21st Annual International Computing and Combinatorics Conference (COCOON)},
pages = {277----288},
title = {{Towards Flexible Demands in Online Leasing Problems}},
doi = {10.1007/978-3-319-21398-9_22},
year = {2015},
}
@inproceedings{271,
abstract = {In \emph{bandwidth allocation games} (BAGs), the strategy of a player consists of various demands on different resources. The player's utility is at most the sum of these demands, provided they are fully satisfied. Every resource has a limited capacity and if it is exceeded by the total demand, it has to be split between the players. Since these games generally do not have pure Nash equilibria, we consider approximate pure Nash equilibria, in which no player can improve her utility by more than some fixed factor $\alpha$ through unilateral strategy changes. There is a threshold $\alpha_\delta$ (where $\delta$ is a parameter that limits the demand of each player on a specific resource) such that $\alpha$-approximate pure Nash equilibria always exist for $\alpha \geq \alpha_\delta$, but not for $\alpha < \alpha_\delta$. We give both upper and lower bounds on this threshold $\alpha_\delta$ and show that the corresponding decision problem is ${\sf NP}$-hard. We also show that the $\alpha$-approximate price of anarchy for BAGs is $\alpha+1$. For a restricted version of the game, where demands of players only differ slightly from each other (e.g. symmetric games), we show that approximate Nash equilibria can be reached (and thus also be computed) in polynomial time using the best-response dynamic. Finally, we show that a broader class of utility-maximization games (which includes BAGs) converges quickly towards states whose social welfare is close to the optimum.},
author = {Drees, Maximilian and Feldotto, Matthias and Riechers, Sören and Skopalik, Alexander},
booktitle = {Proceedings of the 8th International Symposium on Algorithmic Game Theory (SAGT)},
pages = {178--189},
title = {{On Existence and Properties of Approximate Pure Nash Equilibria in Bandwidth Allocation Games}},
doi = {10.1007/978-3-662-48433-3_14},
year = {2015},
}
@misc{277,
author = {Kothe, Nils},
publisher = {Universität Paderborn},
title = {{Multilevel Netzwerk Spiele mit konstanten Entfernungen im Highspeed-Netzwerk}},
year = {2015},
}
@misc{316,
author = {Pautz, Jannis},
publisher = {Universität Paderborn},
title = {{Budget Games with priced strategies}},
year = {2015},
}
@phdthesis{317,
author = {Jähn, Claudius},
publisher = {Universität Paderborn},
title = {{Bewertung von Renderingalgorithmen für komplexe 3-D-Szenen}},
year = {2015},
}
@inproceedings{266,
abstract = {Many markets have seen a shift from the idea of buying and moved to leasing instead. Arguably, the latter has been the major catalyst for their success. Ten years ago, research realized this shift and initiated the study of "online leasing problems" by introducing leasing to online optimization problems. Resources required to provide a service in an "online leasing problem" are no more bought but leased for different durations. In this paper, we provide an overview of results that contribute to the understanding of "online resource leasing problems". },
author = {Markarian, Christine and Meyer auf der Heide, Friedhelm},
booktitle = {Proceedings of the 2015 ACM Symposium on Principles of Distributed Computing (PODC)},
pages = {343--344},
title = {{Online Resource Leasing}},
doi = {10.1145/2767386.2767454},
year = {2015},
}
@phdthesis{267,
author = {Markarian, Christine},
publisher = {Universität Paderborn},
title = {{Online Resource Leasing}},
year = {2015},
}
@inproceedings{274,
abstract = {Consider the problem in which n jobs that are classified into k types are to be scheduled on m identical machines without preemption. A machine requires a proper setup taking s time units before processing jobs of a given type. The objective is to minimize the makespan of the resulting schedule. We design and analyze an approximation algorithm that runs in time polynomial in n,m and k and computes a solution with an approximation factor that can be made arbitrarily close to 3/2.},
author = {Mäcker, Alexander and Malatyali, Manuel and Meyer auf der Heide, Friedhelm and Riechers, Sören},
booktitle = {Algorithms and Data Structures: 14th International Symposium, WADS 2015, Victoria, BC, Canada, August 5-7, 2015. Proceedings},
editor = {Dehne, Frank and Sack, Jörg Rüdiger and Stege, Ulrike},
pages = {542----553},
title = {{Non-preemptive Scheduling on Machines with Setup Times}},
doi = {10.1007/978-3-319-21840-3_45},
year = {2015},
}
@article{320,
abstract = {We consider structural and algorithmic questions related to the Nash dynamics of weighted congestion games. In weighted congestion games with linear latency functions, the existence of pure Nash equilibria is guaranteed by a potential function argument. Unfortunately, this proof of existence is inefficient and computing pure Nash equilibria in such games is a PLS-hard problem even when all players have unit weights. The situation gets worse when superlinear (e.g., quadratic) latency functions come into play; in this case, the Nash dynamics of the game may contain cycles and pure Nash equilibria may not even exist. Given these obstacles, we consider approximate pure Nash equilibria as alternative solution concepts. A ρ--approximate pure Nash equilibrium is a state of a (weighted congestion) game from which no player has any incentive to deviate in order to improve her cost by a multiplicative factor higher than ρ. Do such equilibria exist for small values of ρ? And if so, can we compute them efficiently?We provide positive answers to both questions for weighted congestion games with polynomial latency functions by exploiting an “approximation” of such games by a new class of potential games that we call Ψ-games. This allows us to show that these games have d!-approximate pure Nash equilibria, where d is the maximum degree of the latency functions. Our main technical contribution is an efficient algorithm for computing O(1)-approximate pure Nash equilibria when d is a constant. For games with linear latency functions, the approximation guarantee is 3+√5/2 + Oγ for arbitrarily small γ > 0; for latency functions with maximum degree d≥ 2, it is d2d+o(d). The running time is polynomial in the number of bits in the representation of the game and 1/γ. As a byproduct of our techniques, we also show the following interesting structural statement for weighted congestion games with polynomial latency functions of maximum degree d ≥ 2: polynomially-long sequences of best-response moves from any initial state to a dO(d2)-approximate pure Nash equilibrium exist and can be efficiently identified in such games as long as d is a constant.To the best of our knowledge, these are the first positive algorithmic results for approximate pure Nash equilibria in weighted congestion games. Our techniques significantly extend our recent work on unweighted congestion games through the use of Ψ-games. The concept of approximating nonpotential games by potential ones is interesting in itself and might have further applications.},
author = {Caragiannis, Ioannis and Fanelli, Angelo and Gravin, Nick and Skopalik, Alexander},
journal = {Transactions on Economics and Computation},
number = {1},
publisher = {ACM},
title = {{Approximate Pure Nash Equilibria in Weighted Congestion Games: Existence, Efficient Computation, and Structure}},
doi = {10.1145/2614687},
volume = {3},
year = {2015},
}
@inproceedings{453,
abstract = {In this paper we study the potential function in congestion games. We consider both games with non-decreasing cost functions as well as games with non-increasing utility functions. We show that the value of the potential function $\Phi(\sf s)$ of any outcome $\sf s$ of a congestion game approximates the optimum potential value $\Phi(\sf s^*)$ by a factor $\Psi_{\mathcal{F}}$ which only depends on the set of cost/utility functions $\mathcal{F}$, and an additive term which is bounded by the sum of the total possible improvements of the players in the outcome $\sf s$. The significance of this result is twofold. On the one hand it provides \emph{Price-of-Anarchy}-like results with respect to the potential function. On the other hand, we show that these approximations can be used to compute $(1+\varepsilon)\cdot\Psi_{\mathcal{F}}$-approximate pure Nash equilibria for congestion games with non-decreasing cost functions. For the special case of polynomial cost functions, this significantly improves the guarantees from Caragiannis et al. [FOCS 2011]. Moreover, our machinery provides the first guarantees for general latency functions.},
author = {Feldotto, Matthias and Gairing, Martin and Skopalik, Alexander},
booktitle = {Proceedings of the 10th International Conference on Web and Internet Economics (WINE)},
pages = {30--43},
title = {{Bounding the Potential Function in Congestion Games and Approximate Pure Nash Equilibria}},
doi = {10.1007/978-3-319-13129-0_3},
year = {2014},
}
@inproceedings{395,
abstract = {We consider a multilevel network game, where nodes can improvetheir communication costs by connecting to a high-speed network.The n nodes are connected by a static network and each node can decideindividually to become a gateway to the high-speed network. The goalof a node v is to minimize its private costs, i.e., the sum (SUM-game) ormaximum (MAX-game) of communication distances from v to all othernodes plus a fixed price α > 0 if it decides to be a gateway. Between gatewaysthe communication distance is 0, and gateways also improve othernodes’ distances by behaving as shortcuts. For the SUM-game, we showthat for α ≤ n − 1, the price of anarchy is Θ (n/√α) and in this rangeequilibria always exist. In range α ∈ (n−1, n(n−1)) the price of anarchyis Θ(√α), and for α ≥ n(n − 1) it is constant. For the MAX-game, weshow that the price of anarchy is either Θ (1 + n/√α), for α ≥ 1, orelse 1. Given a graph with girth of at least 4α, equilibria always exist.Concerning the dynamics, both games are not potential games. For theSUM-game, we even show that it is not weakly acyclic.},
author = {Abshoff, Sebastian and Cord-Landwehr, Andreas and Jung, Daniel and Skopalik, Alexander},
booktitle = {Proceedings of the 10th International Conference on Web and Internet Economics (WINE)},
pages = {435--440},
title = {{Multilevel Network Games}},
doi = {10.1007/978-3-319-13129-0_36},
year = {2014},
}
@inproceedings{459,
abstract = {In this survey article, we discuss two algorithmic research areas that emerge from problems that arise when resources are offered in the cloud. The first area, online leasing, captures problems arising from the fact that resources in the cloud are not bought, but leased by cloud vendors. The second area, Distributed Storage Systems, deals with problems arising from so-called cloud federations, i.e., when several cloud providers are needed to fulfill a given task.},
author = {Kniesburges, Sebastian and Markarian, Christine and Meyer auf der Heide, Friedhelm and Scheideler, Christian},
booktitle = {Proceedings of the 21st International Colloquium on Structural Information and Communication Complexity (SIROCCO)},
pages = {1--13},
title = {{Algorithmic Aspects of Resource Management in the Cloud}},
doi = {10.1007/978-3-319-09620-9_1},
year = {2014},
}
@inproceedings{435,
abstract = {We give a polynomial time algorithm to compute an optimal energy and fractional weighted flow trade-off schedule for a speed-scalable processor with discrete speeds.Our algorithm uses a geometric approach that is based on structural properties obtained from a primal-dual formulation of the problem.},
author = {Antoniadis, Antonios and Barcelo, Neal and Consuegra, Mario and Kling, Peer and Nugent, Michael and Pruhs, Kirk and Scquizzato, Michele},
booktitle = {Proceedings of the 31st Symposium on Theoretical Aspects of Computer Science (STACS)},
pages = {63----74},
title = {{Efficient Computation of Optimal Energy and Fractional Weighted Flow Trade-off Schedules}},
doi = {10.4230/LIPIcs.STACS.2014.63},
year = {2014},
}
@misc{373,
author = {Pahl, David},
publisher = {Universität Paderborn},
title = {{Reputationssysteme für zusammengesetzte Dienstleistungen}},
year = {2014},
}
@inproceedings{380,
abstract = {Network creation games model the creation and usage costs of networks formed by n selfish nodes. Each node v can buy a set of edges, each for a fixed price α > 0. Its goal is to minimize its private costs, i.e., the sum (SUM-game, Fabrikant et al., PODC 2003) or maximum (MAX-game, Demaine et al., PODC 2007) of distances from v to all other nodes plus the prices of the bought edges. The above papers show the existence of Nash equilibria as well as upper and lower bounds for the prices of anarchy and stability. In several subsequent papers, these bounds were improved for a wide range of prices α. In this paper, we extend these models by incorporating quality-of-service aspects: Each edge cannot only be bought at a fixed quality (edge length one) for a fixed price α. Instead, we assume that quality levels (i.e., edge lengths) are varying in a fixed interval [βˇ,β^] , 0 series = {LNCS}},
author = {Cord-Landwehr, Andreas and Mäcker, Alexander and Meyer auf der Heide, Friedhelm},
booktitle = {Proceedings of the 10th International Conference on Web and Internet Economics (WINE)},
pages = {423--428},
title = {{Quality of Service in Network Creation Games}},
doi = {10.1007/978-3-319-13129-0_34},
year = {2014},
}
@inproceedings{412,
abstract = {In this paper we present and analyze HSkip+, a self-stabilizing overlay network for nodes with arbitrary heterogeneous bandwidths. HSkip+ has the same topology as the Skip+ graph proposed by Jacob et al. [PODC 2009] but its self-stabilization mechanism significantly outperforms the self-stabilization mechanism proposed for Skip+. Also, the nodes are now ordered according to their bandwidths and not according to their identifiers. Various other solutions have already been proposed for overlay networks with heterogeneous bandwidths, but they are not self-stabilizing. In addition to HSkip+ being self-stabilizing, its performance is on par with the best previous bounds on the time and work for joining or leaving a network of peers of logarithmic diameter and degree and arbitrary bandwidths. Also, the dilation and congestion for routing messages is on par with the best previous bounds for such networks, so that HSkip+ combines the advantages of both worlds. Our theoretical investigations are backed by simulations demonstrating that HSkip+ is indeed performing much better than Skip+ and working correctly under high churn rates.},
author = {Feldotto, Matthias and Scheideler, Christian and Graffi, Kalman},
booktitle = {Proceedings of the 14th IEEE International Conference on Peer-to-Peer Computing (P2P)},
pages = {1--10},
title = {{HSkip+: A Self-Stabilizing Overlay Network for Nodes with Heterogeneous Bandwidths}},
doi = {10.1109/P2P.2014.6934300},
year = {2014},
}
@phdthesis{431,
abstract = {In meiner Dissertation besch{\"a}ftige ich mich mit dem Entwurf und der Analyse energieeffizienter Schedulingalgorithmen, insbesondere f{\"u}r sogenannte Speed-Scaling Modelle. Diese stellen das theoretische Pendant von Techniken wie AMDs PowerNOW! und Intels SpeedStep dar, welche es erlauben die Geschwindigkeit von Prozessoren zur Laufzeit an die derzeitigen Bedingungen anzupassen. Theoretische Untersuchungen solcher Modelle sind auf eine Arbeit von Yao, Demers und Shenker (FOCS'95) zur{\"u}ckzuf{\"u}hren. Hier kombinieren die Autoren klassisches Deadline-Scheduling mit einem Prozessor der Speed-Scaling beherrscht. Es gilt Jobs verschiedener Gr{\"o}ße fristgerecht abzuarbeiten und die dabei verwendete Energie zu minimieren. Der Energieverbrauch des Prozessors wird durch eine konvexe Funktion $\POW\colon\R_{\geq0}\to\R_{\geq0}$ modelliert, welche die Geschwindigkeit auf den Energieverbrauch abbildet.Meine Dissertation betrachtet verschiedene Varianten des urspr{\"u}nglichen Speed-Scaling Modells. Forschungsrelevante Ergebnisse sind in den Kapiteln 3 bis 6 zu finden und erstrecken sich {\"u}ber die im Folgenden beschriebenen Aspekte:- Kapitel 3 und 4 betrachten verschiedene \emph{Price-Collecting} Varianten des Originalproblems. Hier d{\"u}rfen einzelne Deadlines verfehlt werden, sofern eine jobabh{\"a}ngige Strafe gezahlt wird. Ich entwerfe insbesondere Online-Algorithmen mit einer beweisbar guten Competitiveness. Dabei liefern meine Ergebnisse substantielle Verbesserungen bestehender Arbeiten und erweitern diese unter Anderem auf Szenarien mit mehreren Prozessoren.- In Kapitel 5 wird statt des klassischen Deadline-Schedulings eine Linearkombination der durchschnittlichen Antwortzeit und des Energieverbrauchs betrachtet. Die Frage, ob dieses Problem NP-schwer ist, stellt eine der zentralen Forschungsfragen in diesem Gebiet dar. F{\"u}r eine relaxierte Form dieser Frage entwerfe ich einen effizienter Algorithmus und beweise seine Optimalit{\"a}t.- Das letzte Kapitel betrachtet ein Modell, welches – auf den ersten Blick – nicht direkt zur Speed-Scaling Literatur z{\"a}hlt. Hier geht es stattdessen um ein allgemeines Resource-Constrained Scheduling, in dem sich die Prozessoren zusammen eine gemeinsame, beliebig aufteilbare Ressource teilen. Ich untersuche die Komplexit{\"a}t des Problems und entwerfe verschiedene Approximationsalgorithmen.},
author = {Kling, Peter},
publisher = {Universität Paderborn},
title = {{Energy-efficient Scheduling Algorithms}},
year = {2014},
}
@inproceedings{462,
abstract = {We discuss a technique to analyze complex infinitely repeated games using techniques from the fields of game theory and simulations. Our research is motivated by the analysis of electronic markets with thousands of participants and possibly complex strategic behavior. We consider an example of a global market of composed IT services to demonstrate the use of our simulation technique. We present our current work in this area and we want to discuss further approaches for the future.},
author = {Feldotto, Matthias and Skopalik, Alexander},
booktitle = {Proceedings of the 4th International Conference on Simulation and Modeling Methodologies, Technologies and Applications (SIMULTECH 2014)},
pages = {625--630},
title = {{A Simulation Framework for Analyzing Complex Infinitely Repeated Games}},
doi = {10.5220/0005110406250630},
year = {2014},
}
@inproceedings{455,
abstract = {We study the existence of approximate pure Nash equilibria in weighted congestion games and develop techniques to obtain approximate potential functions that prove the existence of alpha-approximate pure Nash equilibria and the convergence of alpha-improvement steps. Specifically, we show how to obtain upper bounds for approximation factor alpha for a given class of cost functions. For example for concave cost functions the factor is at most 3/2, for quadratic cost functions it is at most 4/3, and for polynomial cost functions of maximal degree d it is at at most d + 1. For games with two players we obtain tight bounds which are as small as for example 1.054 in the case of quadratic cost functions.},
author = {Hansknecht, Christoph and Klimm, Max and Skopalik, Alexander},
booktitle = {Proceedings of the 17th. International Workshop on Approximation Algorithms for Combinatorial Optimization Problems (APPROX)},
pages = {242 -- 257},
title = {{Approximate pure Nash equilibria in weighted congestion games}},
doi = {10.4230/LIPIcs.APPROX-RANDOM.2014.242},
year = {2014},
}
@inproceedings{379,
abstract = {In the leasing variant of Set Cover presented by Anthony et al.[1], elements U arrive over time and must be covered by sets from a familyF of subsets of U. Each set can be leased for K different periods of time.Let |U| = n and |F| = m. Leasing a set S for a period k incurs a cost ckS and allows S to cover its elements for the next lk time steps. The objectiveis to minimize the total cost of the sets leased, such that elements arrivingat any time t are covered by sets which contain them and are leased duringtime t. Anthony et al. [1] gave an optimal O(log n)-approximation forthe problem in the offline setting, unless P = NP [22]. In this paper, wegive randomized algorithms for variants of Set Cover Leasing in the onlinesetting, including a generalization of Online Set Cover with Repetitionspresented by Alon et al. [2], where elements appear multiple times andmust be covered by a different set at each arrival. Our results improve theO(log2(mn)) competitive factor of Online Set Cover with Repetitions [2]to O(log d log(dn)) = O(logmlog(mn)), where d is the maximum numberof sets an element belongs to.},
author = {Abshoff, Sebastian and Markarian, Christine and Meyer auf der Heide, Friedhelm},
booktitle = {Proceedings of the 8th Annual International Conference on Combinatorial Optimization and Applications (COCOA)},
pages = {25--34},
title = {{Randomized Online Algorithms for Set Cover Leasing Problems}},
doi = {10.1007/978-3-319-12691-3_3},
year = {2014},
}
@inproceedings{451,
abstract = {We introduce the concept of budget games. Players choose a set of tasks and each task has a certain demand on every resource in the game. Each resource has a budget. If the budget is not enough to satisfy the sum of all demands, it has to be shared between the tasks. We study strategic budget games, where the budget is shared proportionally. We also consider a variant in which the order of the strategic decisions influences the distribution of the budgets. The complexity of the optimal solution as well as existence, complexity and quality of equilibria are analysed. Finally, we show that the time an ordered budget game needs to convergence towards an equilibrium may be exponential.},
author = {Drees, Maximilian and Riechers, Sören and Skopalik, Alexander},
booktitle = {Proceedings of the 7th International Symposium on Algorithmic Game Theory (SAGT)},
editor = {Lavi, Ron},
pages = {110--121},
title = {{Budget-restricted utility games with ordered strategic decisions}},
doi = {10.1007/978-3-662-44803-8_10},
year = {2014},
}
@inproceedings{456,
abstract = {We study the existence of approximate pure Nash equilibriain social context congestion games. For any given set of allowed costfunctions F, we provide a threshold value μ(F), and show that for theclass of social context congestion games with cost functions from F, α-Nash dynamics are guaranteed to converge to α-approximate pure Nashequilibrium if and only if α > μ(F).Interestingly, μ(F) is related and always upper bounded by Roughgarden’sanarchy value [19].},
author = {Gairing, Martin and Kotsialou, Grammateia and Skopalik, Alexander},
booktitle = {Proceedings of the 10th International Conference on Web and Internet Economics (WINE)},
pages = {480 -- 485},
title = {{Approximate pure Nash equilibria in Social Context Congestion Games}},
doi = {10.1007/978-3-319-13129-0_43},
year = {2014},
}
@inproceedings{368,
abstract = {We consider the problem of scheduling a number of jobs on $m$ identical processors sharing a continuously divisible resource. Each job j comes with a resource requirement r_j \in {0,1}. The job can be processed at full speed if granted its full resource requirement. If receiving only an x-portion of r_j, it is processed at an x-fraction of the full speed. Our goal is to find a resource assignment that minimizes the makespan (i.e., the latest completion time). Variants of such problems, relating the resource assignment of jobs to their \emph{processing speeds}, have been studied under the term discrete-continuous scheduling. Known results are either very pessimistic or heuristic in nature.In this paper, we suggest and analyze a slightly simplified model. It focuses on the assignment of shared continuous resources to the processors. The job assignment to processors and the ordering of the jobs have already been fixed. It is shown that, even for unit size jobs, finding an optimal solution is NP-hard if the number of processors is part of the input. Positive results for unit size jobs include an efficient optimal algorithm for 2 processors. Moreover, we prove that balanced schedules yield a 2-1/m-approximation for a fixed number of processors. Such schedules are computed by our GreedyBalance algorithm, for which the bound is tight.},
author = {Brinkmann, Andre and Kling, Peter and Meyer auf der Heide, Friedhelm and Nagel, Lars and Riechers, Sören and Süss, Tim },
booktitle = {Proceedings of the 26th ACM Symposium on Parallelism in Algorithms and Architectures (SPAA)},
pages = {128--137},
title = {{Scheduling Shared Continuous Resources on Many-Cores}},
doi = {10.1145/2612669.2612698},
year = {2014},
}
@inproceedings{452,
abstract = {Today's networks, like the Internet, do not consist of one but a mixture of several interconnected networks. Each has individual qualities and hence the performance of a network node results from the networks' interplay.We introduce a new game theoretic model capturing the interplay between a high-speed backbone network and a low-speed general purpose network. In our model, n nodes are connected by a static network and each node can decide individually to become a gateway node. A gateway node pays a fixed price for its connection to the high-speed network, but can utilize the high-speed network to gain communication distance 0 to all other gateways. Communication distances in the low-speed network are given by the hop distances. The effective communication distance between any two nodes then is given by the shortest path, which is possibly improved by using gateways as shortcuts.Every node v has the objective to minimize its communication costs, given by the sum (SUM-game) or maximum (MAX-game) of the effective communication distances from v to all other nodes plus a fixed price \alpha > 0, if it decides to be a gateway. For both games and different ranges of \alpha, we study the existence of equilibria, the price of anarchy, and convergence properties of best-response dynamics.},
author = {Abshoff, Sebastian and Cord-Landwehr, Andreas and Jung, Daniel and Skopalik, Alexander},
booktitle = {Proceedings of the 7th International Symposium on Algorithmic Game Theory (SAGT)},
editor = {Lavi, Ron},
pages = {294},
title = {{Brief Announcement: A Model for Multilevel Network Games}},
year = {2014},
}
@inproceedings{370,
abstract = {Max-min fairness (MMF) is a widely known approach to a fair allocation of bandwidth to each of the users in a network. This allocation can be computed by uniformly raising the bandwidths of all users without violating capacity constraints. We consider an extension of these allocations by raising the bandwidth with arbitrary and not necessarily uniform time-depending velocities (allocation rates). These allocations are used in a game-theoretic context for routing choices, which we formalize in progressive filling games (PFGs).We present a variety of results for equilibria in PFGs. We show that these games possess pure Nash and strong equilibria. While computation in general is NP-hard, there are polynomial-time algorithms for prominent classes of Max-Min-Fair Games (MMFG), including the case when all users have the same source-destination pair. We characterize prices of anarchy and stability for pure Nash and strong equilibria in PFGs and MMFGs when players have different or the same source-destination pairs. In addition, we show that when a designer can adjust allocation rates, it is possible to design games with optimal strong equilibria. Some initial results on polynomial-time algorithms in this direction are also derived. },
author = {Harks, Tobias and Höfer, Martin and Schewior, Kevin and Skopalik, Alexander},
booktitle = {Proceedings of the 33rd Annual IEEE International Conference on Computer Communications (INFOCOM'14)},
pages = {352--360},
title = {{Routing Games with Progressive Filling}},
doi = {10.1109/TNET.2015.2468571},
year = {2014},
}
@inproceedings{477,
abstract = {We consider the k-token dissemination problem, where k initially arbitrarily distributed tokens have to be disseminated to all nodes in a dynamic network (as introduced by Kuhn et al., STOC 2010). In contrast to general dynamic networks, our dynamic networks are unit disk graphs, i.e., nodes are embedded into the Euclidean plane and two nodes are connected if and only if their distance is at most R. Our worst-case adversary is allowed to move the nodes on the plane, but the maximum velocity v_max of each node is limited and the graph must be connected in each round. For this model, we provide almost tight lower and upper bounds for k-token dissemination if nodes are restricted to send only one token per round. It turns out that the maximum velocity v_max is a meaningful parameter to characterize dynamics in our model.},
author = {Abshoff, Sebastian and Benter, Markus and Cord-Landwehr, Andreas and Malatyali, Manuel and Meyer auf der Heide, Friedhelm},
booktitle = {Algorithms for Sensor Systems - 9th International Symposium on Algorithms and Experiments for Sensor Systems, Wireless Networks and Distributed Robotics, {ALGOSENSORS} 2013, Sophia Antipolis, France, September 5-6, 2013, Revised Selected Papers},
pages = {22--34},
title = {{Token Dissemination in Geometric Dynamic Networks}},
doi = {10.1007/978-3-642-45346-5_3},
year = {2013},
}
@inproceedings{505,
abstract = {In this paper we introduce “On-The-Fly Computing”, our vision of future IT services that will be provided by assembling modular software components available on world-wide markets. After suitable components have been found, they are automatically integrated, configured and brought to execution in an On-The-Fly Compute Center. We envision that these future compute centers will continue to leverage three current trends in large scale computing which are an increasing amount of parallel processing, a trend to use heterogeneous computing resources, and—in the light of rising energy cost—energy-efficiency as a primary goal in the design and operation of computing systems. In this paper, we point out three research challenges and our current work in these areas.},
author = {Happe, Markus and Kling, Peter and Plessl, Christian and Platzner, Marco and Meyer auf der Heide, Friedhelm},
booktitle = {Proceedings of the 9th IEEE Workshop on Software Technology for Future embedded and Ubiquitous Systems (SEUS)},
publisher = {IEEE},
title = {{On-The-Fly Computing: A Novel Paradigm for Individualized IT Services}},
doi = {10.1109/ISORC.2013.6913232},
year = {2013},
}
@unpublished{524,
abstract = {We study the complexity theory for the local distributed setting introduced by Korman, Peleg and Fraigniaud. They have defined three complexity classes LD (Local Decision), NLD (Nondeterministic Local Decision) and NLD^#n. The class LD consists of all languages which can be decided with a constant number of communication rounds. The class NLD consists of all languages which can be verified by a nondeterministic algorithm with a constant number of communication rounds. In order to define the nondeterministic classes, they have transferred the notation of nondeterminism into the distributed setting by the use of certificates and verifiers. The class NLD^#n consists of all languages which can be verified by a nondeterministic algorithm where each node has access to an oracle for the number of nodes. They have shown the hierarchy LD subset NLD subset NLD^#n. Our main contributions are strict hierarchies within the classes defined by Korman, Peleg and Fraigniaud. We define additional complexity classes: the class LD(t) consists of all languages which can be decided with at most t communication rounds. The class NLD-O(f) consists of all languages which can be verified by a local verifier such that the size of the certificates that are needed to verify the language are bounded by a function from O(f). Our main results are refined strict hierarchies within these nondeterministic classes.},
author = {Meyer auf der Heide, Friedhelm and Swirkot, Kamil},
publisher = {arXiv},
title = {{Hierarchies in Local Distributed Decision}},
year = {2013},
}
@inproceedings{562,
abstract = {In Distributed Cloud Computing, applications are deployed across many data centres at topologically diverse locations to improved network-related quality of service (QoS). As we focus on interactive applications, we minimize the latency between users and an application by allocating Cloud resources nearby the customers. Allocating resources at all locations will result in the best latency but also in the highest expenses. So we need to find an optimal subset of locations which reduces the latency but also the expenses – the facility location problem (FLP). In addition, we consider resource capacity restrictions, as a resource can only serve a limited amount of users. An FLP can be globally solved. Additionally, we propose a local, distributed heuristic. This heuristic is running within the network and does not depend on a global component. No distributed, local approximations for the capacitated FLP have been proposed so far due to the complexity of the problem. We compared the heuristic with an optimal solution obtained from a mixed integer program for different network topologies. We investigated the influence of different parameters like overall resource utilization or different latency weights.},
author = {Keller, Matthias and Pawlik, Stefan and Pietrzyk, Peter and Karl, Holger},
booktitle = {Proceedings of the 6th International Conference on Utility and Cloud Computing (UCC) workshop on Distributed cloud computing},
pages = {429--434},
title = {{A Local Heuristic for Latency-Optimized Distributed Cloud Deployment}},
doi = {10.1109/UCC.2013.85},
year = {2013},
}
@inproceedings{563,
abstract = {Dominating set based virtual backbones are used for rou-ting in wireless ad-hoc networks. Such backbones receive and transmit messages from/to every node in the network. Existing distributed algorithms only consider undirected graphs, which model symmetric networks with uniform transmission ranges. We are particularly interested in the well-established disk graphs, which model asymmetric networks with non-uniform transmission ranges. The corresponding graph theoretic problem seeks a strongly connected dominating-absorbent set of minimum cardinality in a digraph. A subset of nodes in a digraph is a strongly connected dominating-absorbent set if the subgraph induced by these nodes is strongly connected and each node in the graph is either in the set or has both an in-neighbor and an out-neighbor in it. We introduce the first distributed algorithm for this problem in disk graphs. The algorithm gives an O(k^4) -approximation ratio and has a runtime bound of O(Diam) where Diam is the diameter of the graph and k denotes the transmission ratio r_{max}/r_{min} with r_{max} and r_{min} being the maximum and minimum transmission range, respectively. Moreover, we apply our algorithm on the subgraph of disk graphs consisting of only bidirectional edges. Our algorithm gives an O(ln k) -approximation and a runtime bound of O(k^8 log^∗ n) , which, for bounded k , is an optimal approximation for the problem, following Lenzen and Wattenhofer’s Ω(log^∗ n) runtime lower bound for distributed constant approximation in disk graphs.},
author = {Markarian, Christine and Meyer auf der Heide, Friedhelm and Schubert, Michael},
booktitle = {Proceedings of the 9th International Symposium on Algorithms and Experiments for Sensor Systems, Wireless Networks and Distributed Robotics (ALGOSENSORS)},
pages = {217--227},
title = {{A Distributed Approximation Algorithm for Strongly Connected Dominating-Absorbent Sets in Asymmetric Wireless Ad-Hoc Networks}},
doi = {10.1007/978-3-642-45346-5_16},
year = {2013},
}
@inproceedings{507,
abstract = {We study two-party communication in the context of directed dynamic networks that are controlled by an adaptive adversary. This adversary is able to change all edges as long as the networks stay strongly-connected in each round. In this work, we establish a relation between counting the total number of nodes in the network and the problem of exchanging tokens between two communication partners which communicate through a dynamic network. We show that the communication problem for a constant fraction of n tokens in a dynamic network with n nodes is at most as hard as counting the number of nodes in a dynamic network with at most 4n+3 nodes. For the proof, we construct a family of directed dynamic networks and apply a lower bound from two-party communication complexity.},
author = {Abshoff, Sebastian and Benter, Markus and Malatyali, Manuel and Meyer auf der Heide, Friedhelm},
booktitle = {Proceedings of the 17th International Conference on Principles of Distributed Systems (OPODIS)},
pages = {11--22},
title = {{On Two-Party Communication Through Dynamic Networks}},
doi = {10.1007/978-3-319-03850-6_2},
year = {2013},
}
@phdthesis{514,
abstract = {Diese Arbeit besch{\"a}ftigt sich mit dem Facility Location Problem. Dies ist ein Optimierungsproblem, bei dem festgelegt werden muss an welchen Positionen Ressourcen zur Verf{\"u}gung gestellt werden, so dass diese von Nutzern gut erreicht werden k{\"o}nnen. Es sollen dabei Kosten minimiert werden, die zum einen durch Bereitstellung von Ressourcen und zum anderen durch Verbindungskosten zwischen Nutzern und Ressourcen entstehen. Die Schwierigkeit des Problems liegt darin, dass man einerseits m{\"o}glichst wenige Ressourcen zur Verf{\"u}gung stellen m{\"o}chte, andererseits daf{\"u}r sorgen muss, dass sich Nutzer nicht all zu weit weg von Ressourcen befinden. Dies w{\"u}rde n{\"a}mlich hohe Verbindungskosten nach sich ziehen. Das Facility Location Problem wurde bereits sehr intensiv in vielen unterschiedlichen Varianten untersucht. In dieser Arbeit werden drei Varianten des Problems modelliert und neue Algorithmen f{\"u}r sie entwickelt und bez{\"u}glich ihres Approximationsfaktors und ihrer Laufzeit analysiert. Jede dieser drei untersuchten Varianten hat einen besonderen Schwerpunkt. Bei der ersten Varianten handelt es sich um ein Online Problem, da hier die Eingabe nicht von Anfang an bekannt ist, sondern Schritt f{\"u}r Schritt enth{\"u}llt wird. Die Schwierigkeit hierbei besteht darin unwiderrufliche Entscheidungen treffen zu m{\"u}ssen ohne dabei die Zukunft zu kennen und trotzdem eine zu jeder Zeit gute L{\"o}sung angeben zu k{\"o}nnen. Der Schwerpunkt der zweiten Variante liegt auf Lokalit{\"a}t, die z.B. in Sensornetzwerken von großer Bedeutung ist. Hier soll eine L{\"o}sung verteilt und nur mit Hilfe von lokalen Information berechnet werden. Schließlich besch{\"a}ftigt sich die dritte Variante mit einer verteilten Berechnung, bei welcher nur eine stark beschr{\"a}nkte Datenmenge verschickt werden darf und dabei trotzdem ein sehr guter Approximationsfaktor erreicht werden muss. Die bei der Analyse der Approximationsfaktoren bzw. der Kompetitivit{\"a}t verwendeten Techniken basieren zum großen Teil auf Absch{\"a}tzung der primalen L{\"o}sung mit Hilfe einer L{\"o}sung des zugeh{\"o}rigen dualen Problems. F{\"u}r die Modellierung von Lokalit{\"a}t wird das weitverbreitete LOCAL Modell verwendet. In diesem Modell werden f{\"u}r die Algorithmen subpolynomielle obere Laufzeitschranken gezeigt.},
author = {Pietrzyk, Peter},
publisher = {Universität Paderborn},
title = {{Local and Online Algorithms for Facility Location}},
year = {2013},
}
@inproceedings{499,
abstract = {We present a new online algorithm for profit-oriented scheduling on multiple speed-scalable processors.Moreover, we provide a tight analysis of the algorithm's competitiveness.Our results generalize and improve upon work by \citet{Chan:2010}, which considers a single speed-scalable processor.Using significantly different techniques, we can not only extend their model to multiprocessors but also prove an enhanced and tight competitive ratio for our algorithm.In our scheduling problem, jobs arrive over time and are preemptable.They have different workloads, values, and deadlines.The scheduler may decide not to finish a job but instead to suffer a loss equaling the job's value.However, to process a job's workload until its deadline the scheduler must invest a certain amount of energy.The cost of a schedule is the sum of lost values and invested energy.In order to finish a job the scheduler has to determine which processors to use and set their speeds accordingly.A processor's energy consumption is power $\Power{s}$ integrated over time, where $\Power{s}=s^{\alpha}$ is the power consumption when running at speed $s$.Since we consider the online variant of the problem, the scheduler has no knowledge about future jobs.This problem was introduced by~\citet{Chan:2010} for the case of a single processor.They presented an online algorithm which is $\alpha^{\alpha}+2e\alpha$-competitive.We provide an online algorithm for the case of multiple processors with an improved competitive ratio of $\alpha^{\alpha}$.},
author = {Kling, Peter and Pietrzyk, Peter},
booktitle = {Proceedings of the 25th ACM Symposium on Parallelism in Algorithms and Architectures (SPAA)},
pages = {251--260 },
title = {{Profitable Scheduling on Multiple Speed-Scalable Processors}},
doi = {10.1145/2486159.2486183},
year = {2013},
}
@proceedings{558,
editor = {Flocchini, Paola and Gao, Jie and Kranakis, Evangelos and Meyer auf der Heide, Friedhelm},
location = {Sophia Antipolis, France},
publisher = {Springer},
title = {{Algorithms for Sensor Systems - 9th International Symposium on Algorithms and Experiments for Sensor Systems, Wireless Networks and Distributed Robotics}},
doi = {10.1007/978-3-642-45346-5},
volume = {8243},
year = {2013},
}
@inproceedings{636,
abstract = {We consider an online facility location problem where clients arrive over time and their demands have to be served by opening facilities and assigning the clients to opened facilities. When opening a facility we must choose one of K different lease types to use. A lease type k has a certain lease length lk. Opening a facility i using lease type k causes a cost of f k i and ensures that i is open for the next lk time steps. In addition to costs for opening facilities, we have to take connection costs ci j into account when assigning a client j to facility i. We develop and analyze the first online algorithm for this problem that has a time-independent competitive factor.This variant of the online facility location problem was introduced by Nagarajan and Williamson [7] and is strongly related to both the online facility problem by Meyerson [5] and the parking permit problem by Meyerson [6]. Nagarajan and Williamson gave a 3-approximation algorithm for the offline problem and an O(Klogn)-competitive algorithm for the online variant. Here, n denotes the total number of clients arriving over time. We extend their result by removing the dependency on n (and thereby on the time). In general, our algorithm is O(lmax log(lmax))-competitive. Here lmax denotes the maximum lease length. Moreover, we prove that it is O(log2(lmax))-competitive for many “natural” cases. Such cases include, for example, situations where the number of clients arriving in each time step does not vary too much, or is non-increasing, or is polynomially bounded in lmax.},
author = {Meyer auf der Heide, Friedhelm and Pietrzyk, Peter and Kling, Peter},
booktitle = {Proceedings of the 19th International Colloquium on Structural Information & Communication Complexity (SIROCCO)},
pages = {61--72},
title = {{An Algorithm for Facility Leasing}},
doi = {10.1007/978-3-642-31104-8_6},
year = {2012},
}
@inproceedings{580,
abstract = {We present and study a new model for energy-aware and profit-oriented scheduling on a single processor.The processor features dynamic speed scaling as well as suspension to a sleep mode.Jobs arrive over time, are preemptable, and have different sizes, values, and deadlines.On the arrival of a new job, the scheduler may either accept or reject the job.Accepted jobs need a certain energy investment to be finished in time, while rejected jobs cause costs equal to their values.Here, power consumption at speed $s$ is given by $P(s)=s^{\alpha}+\beta$ and the energy investment is power integrated over time.Additionally, the scheduler may decide to suspend the processor to a sleep mode in which no energy is consumed, though awaking entails fixed transition costs $\gamma$.The objective is to minimize the total value of rejected jobs plus the total energy.Our model combines aspects from advanced energy conservation techniques (namely speed scaling and sleep states) and profit-oriented scheduling models.We show that \emph{rejection-oblivious} schedulers (whose rejection decisions are not based on former decisions) have – in contrast to the model without sleep states – an unbounded competitive ratio.It turns out that the jobs' value densities (the ratio between a job's value and its work) are crucial for the performance of such schedulers.We give an algorithm whose competitiveness nearly matches the lower bound w.r.t\text{.} the maximum value density.If the maximum value density is not too large, the competitiveness becomes $\alpha^{\alpha}+2e\alpha$.Also, we show that it suffices to restrict the value density of low-value jobs only.Using a technique from \cite{Chan:2010} we transfer our results to processors with a fixed maximum speed.},
author = {Cord-Landwehr, Andreas and Kling, Peter and Mallmann Trenn, Fredrik},
booktitle = {Proceedings of the 1st Mediterranean Conference on Algorithms (MedAlg)},
editor = {Even, Guy and Rawitz, Dror},
pages = {218--231},
title = {{Slow Down & Sleep for Profit in Online Deadline Scheduling}},
doi = {10.1007/978-3-642-34862-4_17},
year = {2012},
}
@article{579,
abstract = {A left-to-right maximum in a sequence of n numbers s_1, …, s_n is a number that is strictly larger than all preceding numbers. In this article we present a smoothed analysis of the number of left-to-right maxima in the presence of additive random noise. We show that for every sequence of n numbers s_i ∈ [0,1] that are perturbed by uniform noise from the interval [-ε,ε], the expected number of left-to-right maxima is Θ(&sqrt;n/ε + log n) for ε>1/n. For Gaussian noise with standard deviation σ we obtain a bound of O((log3/2 n)/σ + log n).We apply our results to the analysis of the smoothed height of binary search trees and the smoothed number of comparisons in the quicksort algorithm and prove bounds of Θ(&sqrt;n/ε + log n) and Θ(n/ε+1&sqrt;n/ε + n log n), respectively, for uniform random noise from the interval [-ε,ε]. Our results can also be applied to bound the smoothed number of points on a convex hull of points in the two-dimensional plane and to smoothed motion complexity, a concept we describe in this article. We bound how often one needs to update a data structure storing the smallest axis-aligned box enclosing a set of points moving in d-dimensional space.},
author = {Damerow, Valentina and Manthey, Bodo and Meyer auf der Heide, Friedhelm and Räcke, Harald and Scheideler, Christian and Sohler, Christian and Tantau, Till},
journal = {Transactions on Algorithms},
number = {3},
pages = {30},
publisher = {ACM},
title = {{Smoothed analysis of left-to-right maxima with applications}},
doi = {10.1145/2229163.2229174},
year = {2012},
}
@phdthesis{601,
abstract = {Wir betrachten eine Gruppe von mobilen, autonomen Robotern in einem ebenen Gel{\"a}nde. Es gibt keine zentrale Steuerung und die Roboter m{\"u}ssen sich selbst koordinieren. Zentrale Herausforderung dabei ist, dass jeder Roboter nur seine unmittelbare Nachbarschaft sieht und auch nur mit Robotern in seiner unmittelbaren Nachbarschaft kommunizieren kann. Daraus ergeben sich viele algorithmische Fragestellungen. In dieser Arbeit wird untersucht, unter welchen Voraussetzungen die Roboter sich auf einem Punkt versammeln bzw. eine Linie zwischen zwei festen Stationen bilden k{\"o}nnen. Daf{\"u}r werden mehrere Roboter-Strategien in verschiedenen Bewegungsmodellen vorgestellt. Diese Strategien werden auf ihre Effizienz hin untersucht. Es werden obere und untere Schranken f{\"u}r die ben{\"o}tigte Anzahl Runden und die Bewegungsdistanz gezeigt. In einigen F{\"a}llen wird außerdem die ben{\"o}tigte Bewegungsdistanz mit derjenigen Bewegungsdistanz verglichen, die eine optimale globale Strategie auf der gleichen Instanz ben{\"o}tigen w{\"u}rde. So werden kompetititve Faktoren hergeleitet.},
author = {Kempkes, Barbara},
publisher = {Universität Paderborn},
title = {{Local strategies for robot formation problems}},
year = {2012},
}
@inproceedings{619,
abstract = {Dynamics in networks is caused by a variety of reasons, like nodes moving in 2D (or 3D) in multihop cellphone networks, joins and leaves in peer-to-peer networks, evolution in social networks, and many others. In order to understand such kinds of dynamics, and to design distributed algorithms that behave well under dynamics, many ways to model dynamics are introduced and analyzed w.r.t. correctness and eciency of distributed algorithms. In [16], Kuhn, Lynch, and Oshman have introduced a very general, worst case type model of dynamics: The edge set of the network may change arbitrarily from step to step, the only restriction is that it is connected at all times and the set of nodes does not change. An extended model demands that a xed connected subnetwork is maintained over each time interval of length T (T-interval dynamics). They have presented, among others, algorithms for counting the number of nodes under such general models of dynamics.In this paper, we generalize their models and algorithms by adding random edge faults, i.e., we consider fault-prone dynamic networks: We assume that an edge currently existing may fail to transmit data with some probability p. We rst observe that strong counting, i.e., each node knows the correct count and stops, is not possible in a model with random edge faults. Our main two positive results are feasibility and runtime bounds for weak counting, i.e., stopping is no longer required (but still a correct count in each node), and for strong counting with an upper bound, i.e., an upper bound N on n is known to all nodes.},
author = {Brandes, Philipp and Meyer auf der Heide, Friedhelm},
booktitle = {Proceedings of the 4th Workshop on Theoretical Aspects of Dynamic Distributed Systems (TADDS)},
pages = {9--14},
title = {{Distributed Computing in Fault-Prone Dynamic Networks}},
doi = {10.1145/2414815.2414818},
year = {2012},
}
@misc{638,
author = {Eidens, Fabian},
publisher = {Universität Paderborn},
title = {{Adaptive Verbindungsstrategien in dynamischen Suchnetzwerken}},
year = {2012},
}