@misc{316,
author = {Pautz, Jannis},
publisher = {Universität Paderborn},
title = {{Budget Games with priced strategies}},
year = {2015},
}
@misc{277,
author = {Kothe, Nils},
publisher = {Universität Paderborn},
title = {{Multilevel Netzwerk Spiele mit konstanten Entfernungen im Highspeed-Netzwerk}},
year = {2015},
}
@unpublished{16452,
abstract = {We consider the problem of dominating set-based virtual backbone used for
routing in asymmetric wireless ad-hoc networks. These networks have non-uniform
transmission ranges and are modeled using the well-established disk graphs. The
corresponding graph theoretic problem seeks a strongly connected
dominating-absorbent set of minimum cardinality in a digraph. A subset of nodes
in a digraph is a strongly connected dominating-absorbent set if the subgraph
induced by these nodes is strongly connected and each node in the graph is
either in the set or has both an in-neighbor and an out-neighbor in it.
Distributed algorithms for this problem are of practical significance due to
the dynamic nature of ad-hoc networks. We present a first distributed
approximation algorithm, with a constant approximation factor and O(Diam)
running time, where Diam is the diameter of the graph. Moreover we present a
simple heuristic algorithm and conduct an extensive simulation study showing
that our heuristic outperforms previously known approaches for the problem.},
author = {Abu-Khzam, Faisal N. and Markarian, Christine and Meyer auf der Heide, Friedhelm and Schubert, Michael},
booktitle = {arXiv:1510.01866},
title = {{Approximation and Heuristic Algorithms for Computing Backbones in Asymmetric Ad-Hoc Networks}},
year = {2015},
}
@phdthesis{317,
author = {Jähn, Claudius},
publisher = {Universität Paderborn},
title = {{Bewertung von Renderingalgorithmen für komplexe 3-D-Szenen}},
year = {2015},
}
@inproceedings{266,
abstract = {Many markets have seen a shift from the idea of buying and moved to leasing instead. Arguably, the latter has been the major catalyst for their success. Ten years ago, research realized this shift and initiated the study of "online leasing problems" by introducing leasing to online optimization problems. Resources required to provide a service in an "online leasing problem" are no more bought but leased for different durations. In this paper, we provide an overview of results that contribute to the understanding of "online resource leasing problems". },
author = {Markarian, Christine and Meyer auf der Heide, Friedhelm},
booktitle = {Proceedings of the 2015 ACM Symposium on Principles of Distributed Computing (PODC)},
pages = {343--344},
title = {{Online Resource Leasing}},
doi = {10.1145/2767386.2767454},
year = {2015},
}
@book{17431,
editor = {Gausemeier, Jürgen and Grafe, Michael and Meyer auf der Heide, Friedhelm},
publisher = {Verlagsschriftenreihe des Heinz Nixdorf Instituts},
title = {{Augmented & Virtual Reality in der Produktentstehung: Grundlagen, Methoden und Werkzeuge; Interaktions- und Visualisierungstechniken, Virtual Prototyping intelligenter technischer Systeme mit AR/VR}},
volume = {342},
year = {2015},
}
@inproceedings{274,
abstract = {Consider the problem in which n jobs that are classified into k types are to be scheduled on m identical machines without preemption. A machine requires a proper setup taking s time units before processing jobs of a given type. The objective is to minimize the makespan of the resulting schedule. We design and analyze an approximation algorithm that runs in time polynomial in n,m and k and computes a solution with an approximation factor that can be made arbitrarily close to 3/2.},
author = {Mäcker, Alexander and Malatyali, Manuel and Meyer auf der Heide, Friedhelm and Riechers, Sören},
booktitle = {Algorithms and Data Structures: 14th International Symposium, WADS 2015, Victoria, BC, Canada, August 5-7, 2015. Proceedings},
editor = {Dehne, Frank and Sack, Jörg Rüdiger and Stege, Ulrike},
pages = {542----553},
title = {{Non-preemptive Scheduling on Machines with Setup Times}},
doi = {10.1007/978-3-319-21840-3_45},
year = {2015},
}
@phdthesis{267,
author = {Markarian, Christine},
publisher = {Universität Paderborn},
title = {{Online Resource Leasing}},
year = {2015},
}
@article{320,
abstract = {We consider structural and algorithmic questions related to the Nash dynamics of weighted congestion games. In weighted congestion games with linear latency functions, the existence of pure Nash equilibria is guaranteed by a potential function argument. Unfortunately, this proof of existence is inefficient and computing pure Nash equilibria in such games is a PLS-hard problem even when all players have unit weights. The situation gets worse when superlinear (e.g., quadratic) latency functions come into play; in this case, the Nash dynamics of the game may contain cycles and pure Nash equilibria may not even exist. Given these obstacles, we consider approximate pure Nash equilibria as alternative solution concepts. A ρ--approximate pure Nash equilibrium is a state of a (weighted congestion) game from which no player has any incentive to deviate in order to improve her cost by a multiplicative factor higher than ρ. Do such equilibria exist for small values of ρ? And if so, can we compute them efficiently?We provide positive answers to both questions for weighted congestion games with polynomial latency functions by exploiting an “approximation” of such games by a new class of potential games that we call Ψ-games. This allows us to show that these games have d!-approximate pure Nash equilibria, where d is the maximum degree of the latency functions. Our main technical contribution is an efficient algorithm for computing O(1)-approximate pure Nash equilibria when d is a constant. For games with linear latency functions, the approximation guarantee is 3+√5/2 + Oγ for arbitrarily small γ > 0; for latency functions with maximum degree d≥ 2, it is d2d+o(d). The running time is polynomial in the number of bits in the representation of the game and 1/γ. As a byproduct of our techniques, we also show the following interesting structural statement for weighted congestion games with polynomial latency functions of maximum degree d ≥ 2: polynomially-long sequences of best-response moves from any initial state to a dO(d2)-approximate pure Nash equilibrium exist and can be efficiently identified in such games as long as d is a constant.To the best of our knowledge, these are the first positive algorithmic results for approximate pure Nash equilibria in weighted congestion games. Our techniques significantly extend our recent work on unweighted congestion games through the use of Ψ-games. The concept of approximating nonpotential games by potential ones is interesting in itself and might have further applications.},
author = {Caragiannis, Ioannis and Fanelli, Angelo and Gravin, Nick and Skopalik, Alexander},
journal = {Transactions on Economics and Computation},
number = {1},
publisher = {ACM},
title = {{Approximate Pure Nash Equilibria in Weighted Congestion Games: Existence, Efficient Computation, and Structure}},
doi = {10.1145/2614687},
volume = {3},
year = {2015},
}
@inproceedings{17425,
author = {Berssenbrügge, Jan and Wiederkehr, Olga and Jähn, Claudius and Fischer, Matthias},
booktitle = {12. Paderborner Workshop Augmented & Virtual Reality in der Produktentstehung},
pages = {65--78},
publisher = {Verlagsschriftenreihe des Heinz Nixdorf Instituts},
title = {{Anbindung des Virtuellen Prototypen an die Partialmodelle intelligenter technischer Systeme}},
volume = {343},
year = {2015},
}
@inproceedings{16460,
abstract = {Consider n nodes connected to a single coordinator. Each node receives an
individual online data stream of numbers and, at any point in time, the
coordinator has to know the k nodes currently observing the largest values, for
a given k between 1 and n. We design and analyze an algorithm that solves this
problem while bounding the amount of messages exchanged between the nodes and
the coordinator. Our algorithm employs the idea of using filters which,
intuitively speaking, leads to few messages to be sent, if the new input is
"similar" to the previous ones. The algorithm uses a number of messages that is
on expectation by a factor of O((log {\Delta} + k) log n) larger than that of
an offline algorithm that sets filters in an optimal way, where {\Delta} is
upper bounded by the largest value observed by any node.},
author = {Mäcker, Alexander and Malatyali, Manuel and Meyer auf der Heide, Friedhelm},
booktitle = {Proceedings of the 29th International Parallel and Distributed Processing Symposium (IPDPS)},
pages = {357--364},
publisher = {IEEE},
title = {{Online Top-k-Position Monitoring of Distributed Data Streams}},
doi = {10.1109/IPDPS.2015.40},
year = {2015},
}
@inproceedings{395,
abstract = {We consider a multilevel network game, where nodes can improvetheir communication costs by connecting to a high-speed network.The n nodes are connected by a static network and each node can decideindividually to become a gateway to the high-speed network. The goalof a node v is to minimize its private costs, i.e., the sum (SUM-game) ormaximum (MAX-game) of communication distances from v to all othernodes plus a fixed price α > 0 if it decides to be a gateway. Between gatewaysthe communication distance is 0, and gateways also improve othernodes’ distances by behaving as shortcuts. For the SUM-game, we showthat for α ≤ n − 1, the price of anarchy is Θ (n/√α) and in this rangeequilibria always exist. In range α ∈ (n−1, n(n−1)) the price of anarchyis Θ(√α), and for α ≥ n(n − 1) it is constant. For the MAX-game, weshow that the price of anarchy is either Θ (1 + n/√α), for α ≥ 1, orelse 1. Given a graph with girth of at least 4α, equilibria always exist.Concerning the dynamics, both games are not potential games. For theSUM-game, we even show that it is not weakly acyclic.},
author = {Abshoff, Sebastian and Cord-Landwehr, Andreas and Jung, Daniel and Skopalik, Alexander},
booktitle = {Proceedings of the 10th International Conference on Web and Internet Economics (WINE)},
pages = {435--440},
title = {{Multilevel Network Games}},
doi = {10.1007/978-3-319-13129-0_36},
year = {2014},
}
@inproceedings{453,
abstract = {In this paper we study the potential function in congestion games. We consider both games with non-decreasing cost functions as well as games with non-increasing utility functions. We show that the value of the potential function $\Phi(\sf s)$ of any outcome $\sf s$ of a congestion game approximates the optimum potential value $\Phi(\sf s^*)$ by a factor $\Psi_{\mathcal{F}}$ which only depends on the set of cost/utility functions $\mathcal{F}$, and an additive term which is bounded by the sum of the total possible improvements of the players in the outcome $\sf s$. The significance of this result is twofold. On the one hand it provides \emph{Price-of-Anarchy}-like results with respect to the potential function. On the other hand, we show that these approximations can be used to compute $(1+\varepsilon)\cdot\Psi_{\mathcal{F}}$-approximate pure Nash equilibria for congestion games with non-decreasing cost functions. For the special case of polynomial cost functions, this significantly improves the guarantees from Caragiannis et al. [FOCS 2011]. Moreover, our machinery provides the first guarantees for general latency functions.},
author = {Feldotto, Matthias and Gairing, Martin and Skopalik, Alexander},
booktitle = {Proceedings of the 10th International Conference on Web and Internet Economics (WINE)},
pages = {30--43},
title = {{Bounding the Potential Function in Congestion Games and Approximate Pure Nash Equilibria}},
doi = {10.1007/978-3-319-13129-0_3},
year = {2014},
}
@inproceedings{435,
abstract = {We give a polynomial time algorithm to compute an optimal energy and fractional weighted flow trade-off schedule for a speed-scalable processor with discrete speeds.Our algorithm uses a geometric approach that is based on structural properties obtained from a primal-dual formulation of the problem.},
author = {Antoniadis, Antonios and Barcelo, Neal and Consuegra, Mario and Kling, Peer and Nugent, Michael and Pruhs, Kirk and Scquizzato, Michele},
booktitle = {Proceedings of the 31st Symposium on Theoretical Aspects of Computer Science (STACS)},
pages = {63----74},
title = {{Efficient Computation of Optimal Energy and Fractional Weighted Flow Trade-off Schedules}},
doi = {10.4230/LIPIcs.STACS.2014.63},
year = {2014},
}
@inproceedings{459,
abstract = {In this survey article, we discuss two algorithmic research areas that emerge from problems that arise when resources are offered in the cloud. The first area, online leasing, captures problems arising from the fact that resources in the cloud are not bought, but leased by cloud vendors. The second area, Distributed Storage Systems, deals with problems arising from so-called cloud federations, i.e., when several cloud providers are needed to fulfill a given task.},
author = {Kniesburges, Sebastian and Markarian, Christine and Meyer auf der Heide, Friedhelm and Scheideler, Christian},
booktitle = {Proceedings of the 21st International Colloquium on Structural Information and Communication Complexity (SIROCCO)},
pages = {1--13},
title = {{Algorithmic Aspects of Resource Management in the Cloud}},
doi = {10.1007/978-3-319-09620-9_1},
year = {2014},
}
@phdthesis{431,
abstract = {In meiner Dissertation besch{\"a}ftige ich mich mit dem Entwurf und der Analyse energieeffizienter Schedulingalgorithmen, insbesondere f{\"u}r sogenannte Speed-Scaling Modelle. Diese stellen das theoretische Pendant von Techniken wie AMDs PowerNOW! und Intels SpeedStep dar, welche es erlauben die Geschwindigkeit von Prozessoren zur Laufzeit an die derzeitigen Bedingungen anzupassen. Theoretische Untersuchungen solcher Modelle sind auf eine Arbeit von Yao, Demers und Shenker (FOCS'95) zur{\"u}ckzuf{\"u}hren. Hier kombinieren die Autoren klassisches Deadline-Scheduling mit einem Prozessor der Speed-Scaling beherrscht. Es gilt Jobs verschiedener Gr{\"o}ße fristgerecht abzuarbeiten und die dabei verwendete Energie zu minimieren. Der Energieverbrauch des Prozessors wird durch eine konvexe Funktion $\POW\colon\R_{\geq0}\to\R_{\geq0}$ modelliert, welche die Geschwindigkeit auf den Energieverbrauch abbildet.Meine Dissertation betrachtet verschiedene Varianten des urspr{\"u}nglichen Speed-Scaling Modells. Forschungsrelevante Ergebnisse sind in den Kapiteln 3 bis 6 zu finden und erstrecken sich {\"u}ber die im Folgenden beschriebenen Aspekte:- Kapitel 3 und 4 betrachten verschiedene \emph{Price-Collecting} Varianten des Originalproblems. Hier d{\"u}rfen einzelne Deadlines verfehlt werden, sofern eine jobabh{\"a}ngige Strafe gezahlt wird. Ich entwerfe insbesondere Online-Algorithmen mit einer beweisbar guten Competitiveness. Dabei liefern meine Ergebnisse substantielle Verbesserungen bestehender Arbeiten und erweitern diese unter Anderem auf Szenarien mit mehreren Prozessoren.- In Kapitel 5 wird statt des klassischen Deadline-Schedulings eine Linearkombination der durchschnittlichen Antwortzeit und des Energieverbrauchs betrachtet. Die Frage, ob dieses Problem NP-schwer ist, stellt eine der zentralen Forschungsfragen in diesem Gebiet dar. F{\"u}r eine relaxierte Form dieser Frage entwerfe ich einen effizienter Algorithmus und beweise seine Optimalit{\"a}t.- Das letzte Kapitel betrachtet ein Modell, welches – auf den ersten Blick – nicht direkt zur Speed-Scaling Literatur z{\"a}hlt. Hier geht es stattdessen um ein allgemeines Resource-Constrained Scheduling, in dem sich die Prozessoren zusammen eine gemeinsame, beliebig aufteilbare Ressource teilen. Ich untersuche die Komplexit{\"a}t des Problems und entwerfe verschiedene Approximationsalgorithmen.},
author = {Kling, Peter},
publisher = {Universität Paderborn},
title = {{Energy-efficient Scheduling Algorithms}},
year = {2014},
}
@inproceedings{412,
abstract = {In this paper we present and analyze HSkip+, a self-stabilizing overlay network for nodes with arbitrary heterogeneous bandwidths. HSkip+ has the same topology as the Skip+ graph proposed by Jacob et al. [PODC 2009] but its self-stabilization mechanism significantly outperforms the self-stabilization mechanism proposed for Skip+. Also, the nodes are now ordered according to their bandwidths and not according to their identifiers. Various other solutions have already been proposed for overlay networks with heterogeneous bandwidths, but they are not self-stabilizing. In addition to HSkip+ being self-stabilizing, its performance is on par with the best previous bounds on the time and work for joining or leaving a network of peers of logarithmic diameter and degree and arbitrary bandwidths. Also, the dilation and congestion for routing messages is on par with the best previous bounds for such networks, so that HSkip+ combines the advantages of both worlds. Our theoretical investigations are backed by simulations demonstrating that HSkip+ is indeed performing much better than Skip+ and working correctly under high churn rates.},
author = {Feldotto, Matthias and Scheideler, Christian and Graffi, Kalman},
booktitle = {Proceedings of the 14th IEEE International Conference on Peer-to-Peer Computing (P2P)},
pages = {1--10},
title = {{HSkip+: A Self-Stabilizing Overlay Network for Nodes with Heterogeneous Bandwidths}},
doi = {10.1109/P2P.2014.6934300},
year = {2014},
}
@misc{373,
author = {Pahl, David},
publisher = {Universität Paderborn},
title = {{Reputationssysteme für zusammengesetzte Dienstleistungen}},
year = {2014},
}
@inproceedings{380,
abstract = {Network creation games model the creation and usage costs of networks formed by n selfish nodes. Each node v can buy a set of edges, each for a fixed price α > 0. Its goal is to minimize its private costs, i.e., the sum (SUM-game, Fabrikant et al., PODC 2003) or maximum (MAX-game, Demaine et al., PODC 2007) of distances from v to all other nodes plus the prices of the bought edges. The above papers show the existence of Nash equilibria as well as upper and lower bounds for the prices of anarchy and stability. In several subsequent papers, these bounds were improved for a wide range of prices α. In this paper, we extend these models by incorporating quality-of-service aspects: Each edge cannot only be bought at a fixed quality (edge length one) for a fixed price α. Instead, we assume that quality levels (i.e., edge lengths) are varying in a fixed interval [βˇ,β^] , 0 series = {LNCS}},
author = {Cord-Landwehr, Andreas and Mäcker, Alexander and Meyer auf der Heide, Friedhelm},
booktitle = {Proceedings of the 10th International Conference on Web and Internet Economics (WINE)},
pages = {423--428},
title = {{Quality of Service in Network Creation Games}},
doi = {10.1007/978-3-319-13129-0_34},
year = {2014},
}
@inproceedings{455,
abstract = {We study the existence of approximate pure Nash equilibria in weighted congestion games and develop techniques to obtain approximate potential functions that prove the existence of alpha-approximate pure Nash equilibria and the convergence of alpha-improvement steps. Specifically, we show how to obtain upper bounds for approximation factor alpha for a given class of cost functions. For example for concave cost functions the factor is at most 3/2, for quadratic cost functions it is at most 4/3, and for polynomial cost functions of maximal degree d it is at at most d + 1. For games with two players we obtain tight bounds which are as small as for example 1.054 in the case of quadratic cost functions.},
author = {Hansknecht, Christoph and Klimm, Max and Skopalik, Alexander},
booktitle = {Proceedings of the 17th. International Workshop on Approximation Algorithms for Combinatorial Optimization Problems (APPROX)},
pages = {242 -- 257},
title = {{Approximate pure Nash equilibria in weighted congestion games}},
doi = {10.4230/LIPIcs.APPROX-RANDOM.2014.242},
year = {2014},
}
@inproceedings{462,
abstract = {We discuss a technique to analyze complex infinitely repeated games using techniques from the fields of game theory and simulations. Our research is motivated by the analysis of electronic markets with thousands of participants and possibly complex strategic behavior. We consider an example of a global market of composed IT services to demonstrate the use of our simulation technique. We present our current work in this area and we want to discuss further approaches for the future.},
author = {Feldotto, Matthias and Skopalik, Alexander},
booktitle = {Proceedings of the 4th International Conference on Simulation and Modeling Methodologies, Technologies and Applications (SIMULTECH 2014)},
pages = {625--630},
title = {{A Simulation Framework for Analyzing Complex Infinitely Repeated Games}},
doi = {10.5220/0005110406250630},
year = {2014},
}
@inbook{16394,
author = {Lukovszki, Tamás and Meyer auf der Heide, Friedhelm},
booktitle = {Lecture Notes in Computer Science},
isbn = {9783319144719},
issn = {0302-9743},
title = {{Fast Collisionless Pattern Formation by Anonymous, Position-Aware Robots}},
doi = {10.1007/978-3-319-14472-6_17},
year = {2014},
}
@book{16870,
editor = {Flocchini, Paola and Gao, Jie and Kranakis, Evangelos and Meyer auf der Heide, Friedhelm},
isbn = {9783642453458},
issn = {0302-9743},
publisher = {Springer},
title = {{Algorithms for Sensor Systems - 9th International Symposium on Algorithms and Experiments for Sensor Systems, Wireless Networks and Distributed Robotics, {ALGOSENSORS} 2013}},
doi = {10.1007/978-3-642-45346-5},
volume = {8243},
year = {2014},
}
@inproceedings{379,
abstract = {In the leasing variant of Set Cover presented by Anthony et al.[1], elements U arrive over time and must be covered by sets from a familyF of subsets of U. Each set can be leased for K different periods of time.Let |U| = n and |F| = m. Leasing a set S for a period k incurs a cost ckS and allows S to cover its elements for the next lk time steps. The objectiveis to minimize the total cost of the sets leased, such that elements arrivingat any time t are covered by sets which contain them and are leased duringtime t. Anthony et al. [1] gave an optimal O(log n)-approximation forthe problem in the offline setting, unless P = NP [22]. In this paper, wegive randomized algorithms for variants of Set Cover Leasing in the onlinesetting, including a generalization of Online Set Cover with Repetitionspresented by Alon et al. [2], where elements appear multiple times andmust be covered by a different set at each arrival. Our results improve theO(log2(mn)) competitive factor of Online Set Cover with Repetitions [2]to O(log d log(dn)) = O(logmlog(mn)), where d is the maximum numberof sets an element belongs to.},
author = {Abshoff, Sebastian and Markarian, Christine and Meyer auf der Heide, Friedhelm},
booktitle = {Proceedings of the 8th Annual International Conference on Combinatorial Optimization and Applications (COCOA)},
pages = {25--34},
title = {{Randomized Online Algorithms for Set Cover Leasing Problems}},
doi = {10.1007/978-3-319-12691-3_3},
year = {2014},
}
@inproceedings{451,
abstract = {We introduce the concept of budget games. Players choose a set of tasks and each task has a certain demand on every resource in the game. Each resource has a budget. If the budget is not enough to satisfy the sum of all demands, it has to be shared between the tasks. We study strategic budget games, where the budget is shared proportionally. We also consider a variant in which the order of the strategic decisions influences the distribution of the budgets. The complexity of the optimal solution as well as existence, complexity and quality of equilibria are analysed. Finally, we show that the time an ordered budget game needs to convergence towards an equilibrium may be exponential.},
author = {Drees, Maximilian and Riechers, Sören and Skopalik, Alexander},
booktitle = {Proceedings of the 7th International Symposium on Algorithmic Game Theory (SAGT)},
editor = {Lavi, Ron},
pages = {110--121},
title = {{Budget-restricted utility games with ordered strategic decisions}},
doi = {10.1007/978-3-662-44803-8_10},
year = {2014},
}
@inproceedings{456,
abstract = {We study the existence of approximate pure Nash equilibriain social context congestion games. For any given set of allowed costfunctions F, we provide a threshold value μ(F), and show that for theclass of social context congestion games with cost functions from F, α-Nash dynamics are guaranteed to converge to α-approximate pure Nashequilibrium if and only if α > μ(F).Interestingly, μ(F) is related and always upper bounded by Roughgarden’sanarchy value [19].},
author = {Gairing, Martin and Kotsialou, Grammateia and Skopalik, Alexander},
booktitle = {Proceedings of the 10th International Conference on Web and Internet Economics (WINE)},
pages = {480 -- 485},
title = {{Approximate pure Nash equilibria in Social Context Congestion Games}},
doi = {10.1007/978-3-319-13129-0_43},
year = {2014},
}
@inbook{16395,
author = {Abshoff, Sebastian and Meyer auf der Heide, Friedhelm},
booktitle = {Structural Information and Communication Complexity},
isbn = {9783319096193},
issn = {0302-9743},
title = {{Continuous Aggregation in Dynamic Ad-Hoc Networks}},
doi = {10.1007/978-3-319-09620-9_16},
year = {2014},
}
@inproceedings{368,
abstract = {We consider the problem of scheduling a number of jobs on $m$ identical processors sharing a continuously divisible resource. Each job j comes with a resource requirement r_j \in {0,1}. The job can be processed at full speed if granted its full resource requirement. If receiving only an x-portion of r_j, it is processed at an x-fraction of the full speed. Our goal is to find a resource assignment that minimizes the makespan (i.e., the latest completion time). Variants of such problems, relating the resource assignment of jobs to their \emph{processing speeds}, have been studied under the term discrete-continuous scheduling. Known results are either very pessimistic or heuristic in nature.In this paper, we suggest and analyze a slightly simplified model. It focuses on the assignment of shared continuous resources to the processors. The job assignment to processors and the ordering of the jobs have already been fixed. It is shown that, even for unit size jobs, finding an optimal solution is NP-hard if the number of processors is part of the input. Positive results for unit size jobs include an efficient optimal algorithm for 2 processors. Moreover, we prove that balanced schedules yield a 2-1/m-approximation for a fixed number of processors. Such schedules are computed by our GreedyBalance algorithm, for which the bound is tight.},
author = {Brinkmann, Andre and Kling, Peter and Meyer auf der Heide, Friedhelm and Nagel, Lars and Riechers, Sören and Süss, Tim },
booktitle = {Proceedings of the 26th ACM Symposium on Parallelism in Algorithms and Architectures (SPAA)},
pages = {128--137},
title = {{Scheduling Shared Continuous Resources on Many-Cores}},
doi = {10.1145/2612669.2612698},
year = {2014},
}
@inproceedings{370,
abstract = {Max-min fairness (MMF) is a widely known approach to a fair allocation of bandwidth to each of the users in a network. This allocation can be computed by uniformly raising the bandwidths of all users without violating capacity constraints. We consider an extension of these allocations by raising the bandwidth with arbitrary and not necessarily uniform time-depending velocities (allocation rates). These allocations are used in a game-theoretic context for routing choices, which we formalize in progressive filling games (PFGs).We present a variety of results for equilibria in PFGs. We show that these games possess pure Nash and strong equilibria. While computation in general is NP-hard, there are polynomial-time algorithms for prominent classes of Max-Min-Fair Games (MMFG), including the case when all users have the same source-destination pair. We characterize prices of anarchy and stability for pure Nash and strong equilibria in PFGs and MMFGs when players have different or the same source-destination pairs. In addition, we show that when a designer can adjust allocation rates, it is possible to design games with optimal strong equilibria. Some initial results on polynomial-time algorithms in this direction are also derived. },
author = {Harks, Tobias and Höfer, Martin and Schewior, Kevin and Skopalik, Alexander},
booktitle = {Proceedings of the 33rd Annual IEEE International Conference on Computer Communications (INFOCOM'14)},
pages = {352--360},
title = {{Routing Games with Progressive Filling}},
doi = {10.1109/TNET.2015.2468571},
year = {2014},
}
@inproceedings{452,
abstract = {Today's networks, like the Internet, do not consist of one but a mixture of several interconnected networks. Each has individual qualities and hence the performance of a network node results from the networks' interplay.We introduce a new game theoretic model capturing the interplay between a high-speed backbone network and a low-speed general purpose network. In our model, n nodes are connected by a static network and each node can decide individually to become a gateway node. A gateway node pays a fixed price for its connection to the high-speed network, but can utilize the high-speed network to gain communication distance 0 to all other gateways. Communication distances in the low-speed network are given by the hop distances. The effective communication distance between any two nodes then is given by the shortest path, which is possibly improved by using gateways as shortcuts.Every node v has the objective to minimize its communication costs, given by the sum (SUM-game) or maximum (MAX-game) of the effective communication distances from v to all other nodes plus a fixed price \alpha > 0, if it decides to be a gateway. For both games and different ranges of \alpha, we study the existence of equilibria, the price of anarchy, and convergence properties of best-response dynamics.},
author = {Abshoff, Sebastian and Cord-Landwehr, Andreas and Jung, Daniel and Skopalik, Alexander},
booktitle = {Proceedings of the 7th International Symposium on Algorithmic Game Theory (SAGT)},
editor = {Lavi, Ron},
pages = {294},
title = {{Brief Announcement: A Model for Multilevel Network Games}},
year = {2014},
}
@phdthesis{19039,
author = {Petring, Ralf},
title = {{Multi-Algorithmen-Rendering: Darstellung heterogener 3-D-Szenen in Echtzeit}},
year = {2014},
}
@inproceedings{477,
abstract = {We consider the k-token dissemination problem, where k initially arbitrarily distributed tokens have to be disseminated to all nodes in a dynamic network (as introduced by Kuhn et al., STOC 2010). In contrast to general dynamic networks, our dynamic networks are unit disk graphs, i.e., nodes are embedded into the Euclidean plane and two nodes are connected if and only if their distance is at most R. Our worst-case adversary is allowed to move the nodes on the plane, but the maximum velocity v_max of each node is limited and the graph must be connected in each round. For this model, we provide almost tight lower and upper bounds for k-token dissemination if nodes are restricted to send only one token per round. It turns out that the maximum velocity v_max is a meaningful parameter to characterize dynamics in our model.},
author = {Abshoff, Sebastian and Benter, Markus and Cord-Landwehr, Andreas and Malatyali, Manuel and Meyer auf der Heide, Friedhelm},
booktitle = {Algorithms for Sensor Systems - 9th International Symposium on Algorithms and Experiments for Sensor Systems, Wireless Networks and Distributed Robotics, {ALGOSENSORS} 2013, Sophia Antipolis, France, September 5-6, 2013, Revised Selected Papers},
pages = {22--34},
title = {{Token Dissemination in Geometric Dynamic Networks}},
doi = {10.1007/978-3-642-45346-5_3},
year = {2013},
}
@phdthesis{17440,
author = {Eikel, Benjamin},
title = {{Spherical visibility sampling : preprocessed visibility for occlusion culling in complex 3D scenes}},
year = {2013},
}
@inproceedings{505,
abstract = {In this paper we introduce “On-The-Fly Computing”, our vision of future IT services that will be provided by assembling modular software components available on world-wide markets. After suitable components have been found, they are automatically integrated, configured and brought to execution in an On-The-Fly Compute Center. We envision that these future compute centers will continue to leverage three current trends in large scale computing which are an increasing amount of parallel processing, a trend to use heterogeneous computing resources, and—in the light of rising energy cost—energy-efficiency as a primary goal in the design and operation of computing systems. In this paper, we point out three research challenges and our current work in these areas.},
author = {Happe, Markus and Kling, Peter and Plessl, Christian and Platzner, Marco and Meyer auf der Heide, Friedhelm},
booktitle = {Proceedings of the 9th IEEE Workshop on Software Technology for Future embedded and Ubiquitous Systems (SEUS)},
publisher = {IEEE},
title = {{On-The-Fly Computing: A Novel Paradigm for Individualized IT Services}},
doi = {10.1109/ISORC.2013.6913232},
year = {2013},
}
@unpublished{524,
abstract = {We study the complexity theory for the local distributed setting introduced by Korman, Peleg and Fraigniaud. They have defined three complexity classes LD (Local Decision), NLD (Nondeterministic Local Decision) and NLD^#n. The class LD consists of all languages which can be decided with a constant number of communication rounds. The class NLD consists of all languages which can be verified by a nondeterministic algorithm with a constant number of communication rounds. In order to define the nondeterministic classes, they have transferred the notation of nondeterminism into the distributed setting by the use of certificates and verifiers. The class NLD^#n consists of all languages which can be verified by a nondeterministic algorithm where each node has access to an oracle for the number of nodes. They have shown the hierarchy LD subset NLD subset NLD^#n. Our main contributions are strict hierarchies within the classes defined by Korman, Peleg and Fraigniaud. We define additional complexity classes: the class LD(t) consists of all languages which can be decided with at most t communication rounds. The class NLD-O(f) consists of all languages which can be verified by a local verifier such that the size of the certificates that are needed to verify the language are bounded by a function from O(f). Our main results are refined strict hierarchies within these nondeterministic classes.},
author = {Meyer auf der Heide, Friedhelm and Swirkot, Kamil},
publisher = {arXiv},
title = {{Hierarchies in Local Distributed Decision}},
year = {2013},
}
@inproceedings{562,
abstract = {In Distributed Cloud Computing, applications are deployed across many data centres at topologically diverse locations to improved network-related quality of service (QoS). As we focus on interactive applications, we minimize the latency between users and an application by allocating Cloud resources nearby the customers. Allocating resources at all locations will result in the best latency but also in the highest expenses. So we need to find an optimal subset of locations which reduces the latency but also the expenses – the facility location problem (FLP). In addition, we consider resource capacity restrictions, as a resource can only serve a limited amount of users. An FLP can be globally solved. Additionally, we propose a local, distributed heuristic. This heuristic is running within the network and does not depend on a global component. No distributed, local approximations for the capacitated FLP have been proposed so far due to the complexity of the problem. We compared the heuristic with an optimal solution obtained from a mixed integer program for different network topologies. We investigated the influence of different parameters like overall resource utilization or different latency weights.},
author = {Keller, Matthias and Pawlik, Stefan and Pietrzyk, Peter and Karl, Holger},
booktitle = {Proceedings of the 6th International Conference on Utility and Cloud Computing (UCC) workshop on Distributed cloud computing},
pages = {429--434},
title = {{A Local Heuristic for Latency-Optimized Distributed Cloud Deployment}},
doi = {10.1109/UCC.2013.85},
year = {2013},
}
@inproceedings{17439,
abstract = {Viele virtuelle 3-D-Szenen im industriellen Bereich sind nicht gleichmäßig strukturiert, z.B. weil sie eine stark unterschiedliche Dichteverteilung der Polygone aufweisen. Für solch heterogene Daten existiert kein Algorithmus, der die Gesamtheit der Daten sowohl schnell als auch mit guter Qualität darstellen kann. Die Auswahl der richtigen Algorithmen für einzelne Szenenteile durch einen Experten ist zeitintensiv und in vielen Visualisierungssystemen nicht umzusetzen. Um dieses Problem zu lösen, setzt das hier vorgestellte Multi-Algorithmen-Rendering verschiedene Renderingalgorithmen gleichzeitig ein, um eine virtuelle 3-D-Szene darzustellen. Das Verfahren unterteilt die Szene dafür in einem Vorverarbeitungsschritt automatisch in geeignete Teilregionen und bestimmt deren Eigenschaften. Diese Daten werden zur Laufzeit dazu genutzt, um ständig für den aktuellen Standpunkt des Betrachters eine Abschätzung der Qualität und Laufzeit der zur Auswahl stehenden Renderingalgorithmen zu berechnen. Durch die Lösung eines Optimierungsproblems kann so bei vorgegebener Bildrate durch die passende Zuordnung der Algorithmen zu den Regionen die Bildqualität optimiert werden – bei automatischer Anpassung an die Leistungsfähigkeit der eingesetzten Hardware. In einer experimentellen Evaluierung vergleichen wir die Laufzeit und Bildqualität des Verfahrens mit denen verbreiteter Standardrenderingverfahren.},
author = {Petring, Ralf and Eikel, Benjamin and Jähn, Claudius and Fischer, Matthias and Meyer auf der Heide, Friedhelm},
booktitle = {11. Paderborner Workshop Augmented & Virtual Reality in der Produktentstehung},
pages = {49----60},
title = {{Darstellung heterogener 3-D-Szenen in Echtzeit}},
volume = {311},
year = {2013},
}
@inproceedings{563,
abstract = {Dominating set based virtual backbones are used for rou-ting in wireless ad-hoc networks. Such backbones receive and transmit messages from/to every node in the network. Existing distributed algorithms only consider undirected graphs, which model symmetric networks with uniform transmission ranges. We are particularly interested in the well-established disk graphs, which model asymmetric networks with non-uniform transmission ranges. The corresponding graph theoretic problem seeks a strongly connected dominating-absorbent set of minimum cardinality in a digraph. A subset of nodes in a digraph is a strongly connected dominating-absorbent set if the subgraph induced by these nodes is strongly connected and each node in the graph is either in the set or has both an in-neighbor and an out-neighbor in it. We introduce the first distributed algorithm for this problem in disk graphs. The algorithm gives an O(k^4) -approximation ratio and has a runtime bound of O(Diam) where Diam is the diameter of the graph and k denotes the transmission ratio r_{max}/r_{min} with r_{max} and r_{min} being the maximum and minimum transmission range, respectively. Moreover, we apply our algorithm on the subgraph of disk graphs consisting of only bidirectional edges. Our algorithm gives an O(ln k) -approximation and a runtime bound of O(k^8 log^∗ n) , which, for bounded k , is an optimal approximation for the problem, following Lenzen and Wattenhofer’s Ω(log^∗ n) runtime lower bound for distributed constant approximation in disk graphs.},
author = {Markarian, Christine and Meyer auf der Heide, Friedhelm and Schubert, Michael},
booktitle = {Proceedings of the 9th International Symposium on Algorithms and Experiments for Sensor Systems, Wireless Networks and Distributed Robotics (ALGOSENSORS)},
pages = {217--227},
title = {{A Distributed Approximation Algorithm for Strongly Connected Dominating-Absorbent Sets in Asymmetric Wireless Ad-Hoc Networks}},
doi = {10.1007/978-3-642-45346-5_16},
year = {2013},
}
@inproceedings{17442,
author = {Meyer auf der Heide, Friedhelm},
booktitle = {11. Paderborner Workshop Augmented & Virtual Reality in der Produktentstehung},
pages = {7----16},
publisher = {Heinz Nixdorf Institut},
title = {{Algorithmische Grundlagen für die Selbstorganisation von Roboterschwärmen}},
volume = {311},
year = {2013},
}
@article{16451,
author = {Brandes, Philipp and Degener, Bastian and Kempkes, Barbara and Meyer auf der Heide, Friedhelm},
issn = {0304-3975},
journal = {Theoretical Computer Science},
pages = {97--112},
title = {{Energy-efficient strategies for building short chains of mobile robots locally}},
doi = {10.1016/j.tcs.2012.10.056},
year = {2013},
}
@inproceedings{16393,
abstract = {Many 3D scenes (e.g. generated from CAD data) are composed of a multitude of objects that are nested in each other. A showroom, for instance, may contain multiple cars and every car has a gearbox with many gearwheels located inside. Because the objects occlude each other, only few are visible from outside. We present a new technique, Spherical Visibility Sampling (SVS), for real-time 3D rendering of such -- possibly highly complex -- scenes. SVS exploits the occlusion and annotates hierarchically structured objects with directional visibility information in a preprocessing step. For different directions, the directional visibility encodes which objects of a scene's region are visible from the outside of the regions' enclosing bounding sphere. Since there is no need to store a separate view space subdivision as in most techniques based on preprocessed visibility, a small memory footprint is achieved. Using the directional visibility information for an interactive walkthrough, the potentially visible objects can be retrieved very efficiently without the need for further visibility tests. Our evaluation shows that using SVS allows to preprocess complex 3D scenes fast and to visualize them in real time (e.g. a Power Plant model and five animated Boeing 777 models with billions of triangles). Because SVS does not require hardware support for occlusion culling during rendering, it is even applicable for rendering large scenes on mobile devices.},
author = {Eikel, Benjamin and Jähn, Claudius and Fischer, Matthias and Meyer auf der Heide, Friedhelm},
booktitle = {Computer Graphics Forum},
issn = {0167-7055},
number = {4},
pages = {49--58},
title = {{Spherical Visibility Sampling}},
doi = {10.1111/cgf.12150},
volume = {32},
year = {2013},
}
@inbook{16406,
abstract = {In order to evaluate the efficiency of algorithms for real-time 3D rendering, different properties like rendering time, occluded triangles, or image quality, need to be investigated. Since these properties depend on the position of the camera, usually some camera path is chosen, along which the measurements are performed. As those measurements cover only a small part of the scene, this approach hardly allows drawing conclusions regarding the algorithm's properties at arbitrary positions in the scene. The presented method allows the systematic and position-independent evaluation of rendering algorithms. It uses an adaptive sampling approach to approximate the distribution of a property (like rendering time) for all positions in the scene. This approximation can be visualized to produce an intuitive impression of the algorithm's behavior or be statistically analyzed for objectively rating and comparing algorithms. We demonstrate our method by evaluating performance aspects of a known occlusion culling algorithm.
},
author = {Jähn, Claudius and Eikel, Benjamin and Fischer, Matthias and Petring, Ralf and Meyer auf der Heide, Friedhelm},
booktitle = {Advances in Visual Computing},
isbn = {9783642419133},
issn = {0302-9743},
title = {{Evaluation of Rendering Algorithms Using Position-Dependent Scene Properties}},
doi = {10.1007/978-3-642-41914-0_12},
year = {2013},
}
@inproceedings{507,
abstract = {We study two-party communication in the context of directed dynamic networks that are controlled by an adaptive adversary. This adversary is able to change all edges as long as the networks stay strongly-connected in each round. In this work, we establish a relation between counting the total number of nodes in the network and the problem of exchanging tokens between two communication partners which communicate through a dynamic network. We show that the communication problem for a constant fraction of n tokens in a dynamic network with n nodes is at most as hard as counting the number of nodes in a dynamic network with at most 4n+3 nodes. For the proof, we construct a family of directed dynamic networks and apply a lower bound from two-party communication complexity.},
author = {Abshoff, Sebastian and Benter, Markus and Malatyali, Manuel and Meyer auf der Heide, Friedhelm},
booktitle = {Proceedings of the 17th International Conference on Principles of Distributed Systems (OPODIS)},
pages = {11--22},
title = {{On Two-Party Communication Through Dynamic Networks}},
doi = {10.1007/978-3-319-03850-6_2},
year = {2013},
}
@phdthesis{514,
abstract = {Diese Arbeit besch{\"a}ftigt sich mit dem Facility Location Problem. Dies ist ein Optimierungsproblem, bei dem festgelegt werden muss an welchen Positionen Ressourcen zur Verf{\"u}gung gestellt werden, so dass diese von Nutzern gut erreicht werden k{\"o}nnen. Es sollen dabei Kosten minimiert werden, die zum einen durch Bereitstellung von Ressourcen und zum anderen durch Verbindungskosten zwischen Nutzern und Ressourcen entstehen. Die Schwierigkeit des Problems liegt darin, dass man einerseits m{\"o}glichst wenige Ressourcen zur Verf{\"u}gung stellen m{\"o}chte, andererseits daf{\"u}r sorgen muss, dass sich Nutzer nicht all zu weit weg von Ressourcen befinden. Dies w{\"u}rde n{\"a}mlich hohe Verbindungskosten nach sich ziehen. Das Facility Location Problem wurde bereits sehr intensiv in vielen unterschiedlichen Varianten untersucht. In dieser Arbeit werden drei Varianten des Problems modelliert und neue Algorithmen f{\"u}r sie entwickelt und bez{\"u}glich ihres Approximationsfaktors und ihrer Laufzeit analysiert. Jede dieser drei untersuchten Varianten hat einen besonderen Schwerpunkt. Bei der ersten Varianten handelt es sich um ein Online Problem, da hier die Eingabe nicht von Anfang an bekannt ist, sondern Schritt f{\"u}r Schritt enth{\"u}llt wird. Die Schwierigkeit hierbei besteht darin unwiderrufliche Entscheidungen treffen zu m{\"u}ssen ohne dabei die Zukunft zu kennen und trotzdem eine zu jeder Zeit gute L{\"o}sung angeben zu k{\"o}nnen. Der Schwerpunkt der zweiten Variante liegt auf Lokalit{\"a}t, die z.B. in Sensornetzwerken von großer Bedeutung ist. Hier soll eine L{\"o}sung verteilt und nur mit Hilfe von lokalen Information berechnet werden. Schließlich besch{\"a}ftigt sich die dritte Variante mit einer verteilten Berechnung, bei welcher nur eine stark beschr{\"a}nkte Datenmenge verschickt werden darf und dabei trotzdem ein sehr guter Approximationsfaktor erreicht werden muss. Die bei der Analyse der Approximationsfaktoren bzw. der Kompetitivit{\"a}t verwendeten Techniken basieren zum großen Teil auf Absch{\"a}tzung der primalen L{\"o}sung mit Hilfe einer L{\"o}sung des zugeh{\"o}rigen dualen Problems. F{\"u}r die Modellierung von Lokalit{\"a}t wird das weitverbreitete LOCAL Modell verwendet. In diesem Modell werden f{\"u}r die Algorithmen subpolynomielle obere Laufzeitschranken gezeigt.},
author = {Pietrzyk, Peter},
publisher = {Universität Paderborn},
title = {{Local and Online Algorithms for Facility Location}},
year = {2013},
}
@inproceedings{499,
abstract = {We present a new online algorithm for profit-oriented scheduling on multiple speed-scalable processors.Moreover, we provide a tight analysis of the algorithm's competitiveness.Our results generalize and improve upon work by \citet{Chan:2010}, which considers a single speed-scalable processor.Using significantly different techniques, we can not only extend their model to multiprocessors but also prove an enhanced and tight competitive ratio for our algorithm.In our scheduling problem, jobs arrive over time and are preemptable.They have different workloads, values, and deadlines.The scheduler may decide not to finish a job but instead to suffer a loss equaling the job's value.However, to process a job's workload until its deadline the scheduler must invest a certain amount of energy.The cost of a schedule is the sum of lost values and invested energy.In order to finish a job the scheduler has to determine which processors to use and set their speeds accordingly.A processor's energy consumption is power $\Power{s}$ integrated over time, where $\Power{s}=s^{\alpha}$ is the power consumption when running at speed $s$.Since we consider the online variant of the problem, the scheduler has no knowledge about future jobs.This problem was introduced by~\citet{Chan:2010} for the case of a single processor.They presented an online algorithm which is $\alpha^{\alpha}+2e\alpha$-competitive.We provide an online algorithm for the case of multiple processors with an improved competitive ratio of $\alpha^{\alpha}$.},
author = {Kling, Peter and Pietrzyk, Peter},
booktitle = {Proceedings of the 25th ACM Symposium on Parallelism in Algorithms and Architectures (SPAA)},
pages = {251--260 },
title = {{Profitable Scheduling on Multiple Speed-Scalable Processors}},
doi = {10.1145/2486159.2486183},
year = {2013},
}
@proceedings{17443,
editor = {Gausemeier, Jürgen and Grafe, Michael and Meyer auf der Heide, Friedhelm},
publisher = {Heinz Nixdorf Institut},
title = {{11. Paderborner Workshop Augmented & Virtual Reality in der Produktentstehung}},
volume = {311},
year = {2013},
}
@inbook{16407,
abstract = {Many virtual 3D scenes, especially those that are large, are not structured evenly. For such heterogeneous data, there is no single algorithm that is able to render every scene type at each position fast and with the same high image quality. For a small set of scenes, this situation can be improved if different rendering algorithms are manually assigned to particular parts of the scene by an experienced user. We introduce the Multi-Algorithm-Rendering method. It automatically deploys different rendering algorithms simultaneously for a broad range of scene types. The method divides the scene into subregions and measures the behavior of different algorithms for each region in a preprocessing step. During runtime, this data is utilized to compute an estimate for the quality and running time of the available rendering algorithms from the observer's point of view. By solving an optimizing problem, the image quality can be optimized by an assignment of algorithms to regions while keeping the frame rate almost constant.
},
author = {Petring, Ralf and Eikel, Benjamin and Jähn, Claudius and Fischer, Matthias and Meyer auf der Heide, Friedhelm},
booktitle = {Advances in Visual Computing},
isbn = {9783642419133},
issn = {0302-9743},
title = {{Real-Time 3D Rendering of Heterogeneous Scenes}},
doi = {10.1007/978-3-642-41914-0_44},
year = {2013},
}
@proceedings{558,
editor = {Flocchini, Paola and Gao, Jie and Kranakis, Evangelos and Meyer auf der Heide, Friedhelm},
location = {Sophia Antipolis, France},
publisher = {Springer},
title = {{Algorithms for Sensor Systems - 9th International Symposium on Algorithms and Experiments for Sensor Systems, Wireless Networks and Distributed Robotics}},
doi = {10.1007/978-3-642-45346-5},
volume = {8243},
year = {2013},
}
@inproceedings{580,
abstract = {We present and study a new model for energy-aware and profit-oriented scheduling on a single processor.The processor features dynamic speed scaling as well as suspension to a sleep mode.Jobs arrive over time, are preemptable, and have different sizes, values, and deadlines.On the arrival of a new job, the scheduler may either accept or reject the job.Accepted jobs need a certain energy investment to be finished in time, while rejected jobs cause costs equal to their values.Here, power consumption at speed $s$ is given by $P(s)=s^{\alpha}+\beta$ and the energy investment is power integrated over time.Additionally, the scheduler may decide to suspend the processor to a sleep mode in which no energy is consumed, though awaking entails fixed transition costs $\gamma$.The objective is to minimize the total value of rejected jobs plus the total energy.Our model combines aspects from advanced energy conservation techniques (namely speed scaling and sleep states) and profit-oriented scheduling models.We show that \emph{rejection-oblivious} schedulers (whose rejection decisions are not based on former decisions) have – in contrast to the model without sleep states – an unbounded competitive ratio.It turns out that the jobs' value densities (the ratio between a job's value and its work) are crucial for the performance of such schedulers.We give an algorithm whose competitiveness nearly matches the lower bound w.r.t\text{.} the maximum value density.If the maximum value density is not too large, the competitiveness becomes $\alpha^{\alpha}+2e\alpha$.Also, we show that it suffices to restrict the value density of low-value jobs only.Using a technique from \cite{Chan:2010} we transfer our results to processors with a fixed maximum speed.},
author = {Cord-Landwehr, Andreas and Kling, Peter and Mallmann Trenn, Fredrik},
booktitle = {Proceedings of the 1st Mediterranean Conference on Algorithms (MedAlg)},
editor = {Even, Guy and Rawitz, Dror},
pages = {218--231},
title = {{Slow Down & Sleep for Profit in Online Deadline Scheduling}},
doi = {10.1007/978-3-642-34862-4_17},
year = {2012},
}
@inproceedings{636,
abstract = {We consider an online facility location problem where clients arrive over time and their demands have to be served by opening facilities and assigning the clients to opened facilities. When opening a facility we must choose one of K different lease types to use. A lease type k has a certain lease length lk. Opening a facility i using lease type k causes a cost of f k i and ensures that i is open for the next lk time steps. In addition to costs for opening facilities, we have to take connection costs ci j into account when assigning a client j to facility i. We develop and analyze the first online algorithm for this problem that has a time-independent competitive factor.This variant of the online facility location problem was introduced by Nagarajan and Williamson [7] and is strongly related to both the online facility problem by Meyerson [5] and the parking permit problem by Meyerson [6]. Nagarajan and Williamson gave a 3-approximation algorithm for the offline problem and an O(Klogn)-competitive algorithm for the online variant. Here, n denotes the total number of clients arriving over time. We extend their result by removing the dependency on n (and thereby on the time). In general, our algorithm is O(lmax log(lmax))-competitive. Here lmax denotes the maximum lease length. Moreover, we prove that it is O(log2(lmax))-competitive for many “natural” cases. Such cases include, for example, situations where the number of clients arriving in each time step does not vary too much, or is non-increasing, or is polynomially bounded in lmax.},
author = {Meyer auf der Heide, Friedhelm and Pietrzyk, Peter and Kling, Peter},
booktitle = {Proceedings of the 19th International Colloquium on Structural Information & Communication Complexity (SIROCCO)},
pages = {61--72},
title = {{An Algorithm for Facility Leasing}},
doi = {10.1007/978-3-642-31104-8_6},
year = {2012},
}