@inproceedings{46364,
  abstract     = {{Automated algorithm configuration procedures play an increasingly important role in the development and application of algorithms for a wide range of computationally challenging problems. Until very recently, these configuration procedures were limited to optimising a single performance objective, such as the running time or solution quality achieved by the algorithm being configured. However, in many applications there is more than one performance objective of interest. This gives rise to the multi-objective automatic algorithm configuration problem, which involves finding a Pareto set of configurations of a given target algorithm that characterises trade-offs between multiple performance objectives. In this work, we introduce MO-ParamILS, a multi-objective extension of the state-of-the-art single-objective algorithm configuration framework ParamILS, and demonstrate that it produces good results on several challenging bi-objective algorithm configuration scenarios compared to a base-line obtained from using a state-of-the-art single-objective algorithm configurator.}},
  author       = {{Blot, A and Hoos, H and Jourdan, L and Marmion, M and Trautmann, Heike}},
  booktitle    = {{LION 2016: Learning and Intelligent Optimization}},
  editor       = {{et al. Joaquin, Vanschooren}},
  pages        = {{32–47}},
  publisher    = {{Springer International Publishing}},
  title        = {{{MO-ParamILS: A Multi-objective Automatic Algorithm Configuration Framework}}},
  doi          = {{10.1007/978-3-319-50349-3_3}},
  volume       = {{10079}},
  year         = {{2016}},
}

@inbook{46363,
  abstract     = {{The averaged Hausdorff distance has been proposed as an indicator for assessing the quality of finitely sized approximations of the Pareto front of a multiobjective problem. Since many set-based, iterative optimization algorithms store their currently best approximation in an internal archive these approximations are also termed archives. In case of two objectives and continuous variables it is known that the best approximations in terms of averaged Hausdorff distance are subsets of the Pareto front if it is concave. If it is linear or circularly concave the points of the best approximation are equally spaced.

Here, it is proven that the optimal averaged Hausdorff approximation and the Pareto front have an empty intersection if the Pareto front is circularly convex. But the points of the best approximation are equally spaced and they rapidly approach the Pareto front for increasing size of the approximation.}},
  author       = {{Rudolph, G and Schütze, O and Trautmann, Heike}},
  booktitle    = {{Applications of Evolutionary Computation: 19$^th$ European Conference, EvoApplications 2016, Porto, Portugal, March 30 — April 1, 2016, Proceedings, Part II}},
  editor       = {{Squillero, G and Burelli, P}},
  isbn         = {{978-3-319-31153-1}},
  pages        = {{42–55}},
  publisher    = {{Springer International Publishing}},
  title        = {{{On the Closest Averaged Hausdorff Archive for a Circularly Convex Pareto Front}}},
  doi          = {{10.1007/978-3-319-31153-1_4}},
  year         = {{2016}},
}

@inproceedings{46369,
  abstract     = {{This paper formally defines multimodality in multiobjective optimization (MO). We introduce a test-bed in which multimodal MO problems with known properties can be constructed as well as numerical characteristics of the resulting landscape. Gradient- and local search based strategies are compared on exemplary problems together with specific performance indicators in the multimodal MO setting. By this means the foundation for Exploratory Landscape Analysis in MO is provided.}},
  author       = {{Kerschke, Pascal and Wang, Hao and Preuss, Mike and Grimme, Christian and Deutz, André and Trautmann, Heike and Emmerich, Michael}},
  booktitle    = {{Proceedings of the 14$^th$ International Conference on Parallel Problem Solving from Nature (PPSN XIV)}},
  pages        = {{962–972}},
  publisher    = {{Springer}},
  title        = {{{Towards Analyzing Multimodality of Multiobjective Landscapes}}},
  doi          = {{10.1007/978-3-319-45823-6_90}},
  year         = {{2016}},
}

@inproceedings{46367,
  abstract     = {{When selecting the best suited algorithm for an unknown optimization problem, it is useful to possess some a priori knowledge of the problem at hand. In the context of single-objective, continuous optimization problems such knowledge can be retrieved by means of Exploratory Landscape Analysis (ELA), which automatically identifies properties of a landscape, e.g., the so-called funnel structures, based on an initial sample. In this paper, we extract the relevant features (for detecting funnels) out of a large set of landscape features when only given a small initial sample consisting of 50 x D observations, where D is the number of decision space dimensions. This is already in the range of the start population sizes of many evolutionary algorithms. The new Multiple Peaks Model Generator (MPM2) is used for training the classifier, and the approach is then very successfully validated on the Black-Box Optimization Benchmark (BBOB) and a subset of the CEC 2013 niching competition problems.}},
  author       = {{Kerschke, Pascal and Preuss, Mike and Wessing, Simon and Trautmann, Heike}},
  booktitle    = {{Proceedings of the 18$^th$ Annual Conference on Genetic and Evolutionary Computation}},
  isbn         = {{978-1-4503-4206-3}},
  pages        = {{229–236}},
  title        = {{{Low-Budget Exploratory Landscape Analysis on Multiple Peaks Models}}},
  doi          = {{10.1145/2908812.2908845}},
  year         = {{2016}},
}

@article{46371,
  abstract     = {{One main task in evolutionary multiobjective optimization (EMO) is to obtain a suitable finite size approximation of the Pareto front which is the image of the solution set, termed the Pareto set, of a given multiobjective optimization problem. In the technical literature, the characteristic of the desired approximation is commonly expressed by closeness to the Pareto front and a sufficient spread of the solutions obtained. In this paper, we first make an effort to show by theoretical and empirical findings that the recently proposed Averaged Hausdorff (or Δ𝑝-) indicator indeed aims at fulfilling both performance criteria for bi-objective optimization problems. In the second part of this paper, standard EMO algorithms combined with a specialized archiver and a postprocessing step based on the Δ𝑝 indicator are introduced which sufficiently approximate the Δ𝑝-optimal archives and generate solutions evenly spread along the Pareto front.}},
  author       = {{Rudolph, G and Schütze, O and Grimme, C and Domínguez-Medina, C and Trautmann, Heike}},
  journal      = {{Computational Optimization and Applications (Comput. Optim. Appl.)}},
  number       = {{2}},
  pages        = {{589–618}},
  title        = {{{Optimal averaged Hausdorff archives for bi-objective problems: theoretical and numerical results}}},
  doi          = {{10.1007/s10589-015-9815-8}},
  volume       = {{64}},
  year         = {{2016}},
}

@article{46372,
  abstract     = {{We present a new hybrid evolutionary algorithm for the effective hypervolume approximation of the Pareto front of a given differentiable multi-objective optimization problem. Starting point for the local search (LS) mechanism is a new division of the decision space as we will argue that in each of these regions a different LS strategy seems to be most promising. For the LS in two out of the three regions we will utilize and adapt the Directed Search method which is capable of steering the search into any direction given in objective space and which is thus well suited for the problem at hand. We further on integrate the resulting LS mechanism into SMS-EMOA, a state-of-the-art evolutionary algorithm for hypervolume approximations. Finally, we will present some numerical results on several benchmark problems with two and three objectives indicating the strength and competitiveness of the novel hybrid.}},
  author       = {{Schütze, O and Sosa, Hernandez VA and Trautmann, Heike and Rudolph, G}},
  journal      = {{Journal of Heuristics}},
  number       = {{3}},
  pages        = {{273–300}},
  title        = {{{The Hypervolume based Directed Search Method for Multi-Objective Optimization Problems}}},
  doi          = {{10.1007/s10732-016-9310-0}},
  volume       = {{22}},
  year         = {{2016}},
}

@inproceedings{46368,
  abstract     = {{Exploratory Landscape Analysis (ELA) aims at understanding characteristics of single-objective continuous (black-box) optimization problems in an automated way. Moreover, the approach provides the basis for constructing algorithm selection models for unseen problem instances. Recently, it has gained increasing attention and numerical features have been designed by various research groups. This paper introduces the R-Package FLACCO which makes all relevant features available in a unified framework together with efficient helper functions. Moreover, a case study which gives perspectives to ELA for multi-objective optimization problems is presented.}},
  author       = {{Kerschke, Pascal and Trautmann, Heike}},
  booktitle    = {{Proceedings of the IEEE Congress on Evolutionary Computation (CEC)}},
  title        = {{{The R-Package FLACCO for Exploratory Landscape Analysis with Applications to Multi-Objective Optimization Problems}}},
  doi          = {{10.1109/CEC.2016.7748359}},
  year         = {{2016}},
}

@article{46370,
  abstract     = {{This report documents the talks and discussions at the Dagstuhl Seminar 15211 "Theory of Evolutionary Algorithms". This seminar, now in its 8th edition, is the main meeting point of the highly active theory of randomized search heuristics subcommunities in Australia, Asia, North America, and Europe. Topics intensively discussed include rigorous runtime analysis and computational complexity theory for randomised search heuristics, information geometry of randomised search, and synergies between the theory of evolutionary algorithms and theories of natural evolution.}},
  author       = {{Neumann, F and Trautmann, Heike}},
  journal      = {{Dagstuhl Reports}},
  number       = {{5}},
  pages        = {{78–79}},
  title        = {{{Working Group Report: Bridging the Gap Between Experiments and Theory Using Feature-Based Run-Time Analysis; Theory of Evolutionary Algorithms (Dagstuhl Seminar 15211)}}},
  doi          = {{10.4230/DagRep.5.5.57}},
  volume       = {{5}},
  year         = {{2016}},
}

@inproceedings{48873,
  abstract     = {{Despite the intrinsic hardness of the Traveling Salesperson Problem (TSP) heuristic solvers, e.g., LKH+restart and EAX+restart, are remarkably successful in generating satisfactory or even optimal solutions. However, the reasons for their success are not yet fully understood. Recent approaches take an analytical viewpoint and try to identify instance features, which make an instance hard or easy to solve. We contribute to this area by generating instance sets for couples of TSP algorithms A and B by maximizing/minimizing their performance difference in order to generate instances which are easier to solve for one solver and much harder to solve for the other. This instance set offers the potential to identify key features which allow to distinguish between the problem hardness classes of both algorithms.}},
  author       = {{Bossek, Jakob and Trautmann, Heike}},
  booktitle    = {{Learning and Intelligent Optimization}},
  editor       = {{Festa, Paola and Sellmann, Meinolf and Vanschoren, Joaquin}},
  isbn         = {{978-3-319-50349-3}},
  keywords     = {{Algorithm selection, Feature selection, Instance hardness, TSP}},
  pages        = {{48–59}},
  publisher    = {{Springer International Publishing}},
  title        = {{{Evolving Instances for Maximizing Performance Differences of State-of-the-Art Inexact TSP Solvers}}},
  doi          = {{10.1007/978-3-319-50349-3_4}},
  year         = {{2016}},
}

@inproceedings{48874,
  abstract     = {{State of the Art inexact solvers of the NP-hard Traveling Salesperson Problem TSP are known to mostly yield high-quality solutions in reasonable computation times. With the purpose of understanding different levels of instance difficulties, instances for the current State of the Art heuristic TSP solvers LKH+restart and EAX+restart are presented which are evolved using a sophisticated evolutionary algorithm. More specifically, the performance differences of the respective solvers are maximized resulting in instances which are easier to solve for one solver and much more difficult for the other. Focusing on both optimization directions, instance features are identified which characterize both types of instances and increase the understanding of solver performance differences.}},
  author       = {{Bossek, Jakob and Trautmann, Heike}},
  booktitle    = {{Proceedings of the XV International Conference of the Italian Association for Artificial Intelligence on Advances in Artificial Intelligence - Volume 10037}},
  isbn         = {{978-3-319-49129-5}},
  keywords     = {{Combinatorial optimization, Instance hardness, Metaheuristics, Transportation, TSP}},
  pages        = {{3–12}},
  publisher    = {{Springer-Verlag}},
  title        = {{{Understanding Characteristics of Evolved Instances for State-of-the-Art Inexact TSP Solvers with Maximum Performance Difference}}},
  doi          = {{10.1007/978-3-319-49130-1_1}},
  year         = {{2016}},
}

@inproceedings{46365,
  abstract     = {{Despite the intrinsic hardness of the Traveling Salesperson Problem (TSP) heuristic solvers, e.g., LKH+restart and EAX+restart, are remarkably successful in generating satisfactory or even optimal solutions. However, the reasons for their success are not yet fully understood. Recent approaches take an analytical viewpoint and try to identify instance features, which make an instance hard or easy to solve. We contribute to this area by generating instance sets for couples of TSP algorithms A and B by maximizing/minimizing their performance difference in order to generate instances which are easier to solve for one solver and much harder to solve for the other. This instance set offers the potential to identify key features which allow to distinguish between the problem hardness classes of both algorithms.}},
  author       = {{Bossek, Jakob and Trautmann, Heike}},
  booktitle    = {{Learning and Intelligent Optimization}},
  editor       = {{Festa, P and Sellmann, M and Vanschoren, J}},
  isbn         = {{978-3-319-50348-6}},
  pages        = {{48–59}},
  publisher    = {{Springer International Publishing}},
  title        = {{{Evolving Instances for Maximizing Performance Differences of State-of-The-Art Inexact TSP Solvers}}},
  doi          = {{10.1007/978-3-319-50349-3_4}},
  volume       = {{10079}},
  year         = {{2016}},
}

@inproceedings{46366,
  abstract     = {{State of the Art inexact solvers of the NP-hard Traveling Salesperson Problem (TSP) are known to mostly yield high-quality solutions in reasonable computation times. With the purpose of understanding different levels of instance difficulties, instances for the current State of the Art heuristic TSP solvers LKH+restart and EAX+restart are presented which are evolved using a sophisticated evolutionary algorithm. More specifically, the performance differences of the respective solvers are maximized resulting in instances which are easier to solve for one solver and much more difficult for the other. Focusing on both optimization directions, instance features are identified which characterize both types of instances and increase the understanding of solver performance differences.}},
  author       = {{Bossek, Jakob and Trautmann, Heike}},
  booktitle    = {{AI*IA 2016 Advances in Artificial Intelligence}},
  editor       = {{Adorni, G and Cagnoni, S and Gori, M and Maratea, M}},
  isbn         = {{978-3-319-49129-5}},
  pages        = {{3–12}},
  publisher    = {{Springer}},
  title        = {{{Understanding Characteristics of Evolved Instances for State-of-the-Art Inexact TSP Solvers with Maximum Performance Difference}}},
  doi          = {{10.1007/978-3-319-49130-1_1}},
  volume       = {{10037}},
  year         = {{2016}},
}

@inproceedings{46373,
  abstract     = {{The need for automatic methods of topic discovery in the Internet grows exponentially with the amount of available textual information. Nowadays it becomes impossible to manually read even a small part of the information in order to reveal the underlying topics. Social media provide us with a great pool of user generated content, where topic discovery may be extremely useful for businesses, politicians, researchers, and other stakeholders. However, conventional topic discovery methods, which are widely used in large text corpora, face several challenges when they are applied in social media and particularly in Twitter – the most popular microblogging platform. To the best of our knowledge no comprehensive overview of these challenges and of the methods dedicated to address these challenges does exist in IS literature until now. Therefore, this paper provides an overview of these challenges, matching methods and their expected usefulness for social media analytics.}},
  author       = {{Chinnov, Andrey and Kerschke, Pascal and Meske, Christian and Stieglitz, Stefan and Trautmann, Heike}},
  booktitle    = {{Proceedings of the 20$^th$ Americas Conference on Information Systems (AMCIS ’15)}},
  isbn         = {{978-0-9966831-0-4}},
  pages        = {{1–10}},
  title        = {{{An Overview of Topic Discovery in Twitter Communication through Social Media Analytics}}},
  year         = {{2015}},
}

@inproceedings{46375,
  abstract     = {{In single-objective optimization different optimization strategies exist depending on the structure and characteristics of the underlying problem. In particular, the presence of so-called funnels in multimodal problems offers the possibility of applying techniques exploiting the global structure of the function. The recently proposed Exploratory Landscape Analysis approach automatically identifies problem characteristics based on a moderately small initial sample of the objective function and proved to be effective for algorithm selection problems in continuous black-box optimization. In this paper, specific features for detecting funnel structures are introduced and combined with the existing ones in order to classify optimization problems regarding the funnel property. The effectiveness of the approach is shown by experiments on specifically generated test instances and validation experiments on standard benchmark problems.}},
  author       = {{Kerschke, Pascal and Preuss, Mike and Wessing, Simon and Trautmann, Heike}},
  booktitle    = {{Proceedings of the Genetic and Evolutionary Computation Conference (GECCO ’15)}},
  editor       = {{Silva, Sara}},
  isbn         = {{978-1-4503-3472-3}},
  pages        = {{265–272}},
  publisher    = {{ACM}},
  title        = {{{Detecting Funnel Structures by Means of Exploratory Landscape Analysis}}},
  doi          = {{10.1145/2739480.2754642}},
  year         = {{2015}},
}

@inproceedings{46376,
  abstract     = {{We investigate per-instance algorithm selection techniques for solving the Travelling Salesman Problem (TSP), based on the two state-of-the-art inexact TSP solvers, LKH and EAX. Our comprehensive experiments demonstrate that the solvers exhibit complementary performance across a diverse set of instances, and the potential for improving the state of the art by selecting between them is significant. Using TSP features from the literature as well as a set of novel features, we show that we can capitalise on this potential by building an efficient selector that achieves significant performance improvements in practice. Our selectors represent a significant improvement in the state-of-the-art in inexact TSP solving, and hence in the ability to find optimal solutions (without proof of optimality) for challenging TSP instances in practice.}},
  author       = {{Kotthoff, Lars and Kerschke, Pascal and Hoos, Holger and Trautmann, Heike}},
  booktitle    = {{Learning and Intelligent Optimization}},
  editor       = {{Dhaenens, Clarisse and Jourdan, Laetitia and Marmion, Marie-Eléonore}},
  isbn         = {{978-3-319-19084-6}},
  pages        = {{202–217}},
  publisher    = {{Springer International Publishing}},
  title        = {{{Improving the State of the Art in Inexact TSP Solving Using Per-Instance Algorithm Selection}}},
  year         = {{2015}},
}

@article{46379,
  abstract     = {{In multiobjective optimization, set-based performance indicators are commonly used to assess the quality of a Pareto front approximation. Based on the scalarization obtained by these indicators, a performance comparison of multiobjective optimization algorithms becomes possible. The R2 and the Hypervolume (HV) indicator represent two recommended approaches which have shown a correlated behavior in recent empirical studies. Whereas the HV indicator has been comprehensively analyzed in the last years, almost no studies on the R2 indicator exist. In this extended version of our previous conference paper, we thus perform a comprehensive investigation of the properties of the R2 indicator in a theoretical and empirical way. The influence of the number and distribution of the weight vectors on the optimal distribution of µ solutions is analyzed. Based on a comparative analysis, specific characteristics and differences of the R2 and HV indicator are presented. Furthermore, the R2 indicator is integrated into an indicator-based steady-state evolutionary multiobjective optimization algorithm (EMOA). It is shown that the so-called R2-EMOA can accurately approximate the optimal distribution of µ solutions regarding R2.}},
  author       = {{Brockhoff, D and Wagner, T and Trautmann, Heike}},
  journal      = {{Evolutionary Computation Journal}},
  number       = {{3}},
  pages        = {{369–395}},
  title        = {{{R2 Indicator Based Multiobjective Search}}},
  doi          = {{10.1162/EVCO_a_00135}},
  volume       = {{23}},
  year         = {{2015}},
}

@inproceedings{46374,
  abstract     = {{We consider a routing problem for a single vehicle serving customer Locations in the course of time. A subset of these customers must necessarily be served, while the complement of this subset contains dynamic customers which request for service over time, and which do not necessarily need to be served. The decision maker’s conflicting goals are serving as many customers as possible as well as minimizing total travel distance. We solve this bi-objective Problem with an evolutionary multi-objective algorithm in order to provide an a-posteriori evaluation tool for enabling decision makers to assess the single objective solution strategies that they actually use in real-time. We present the modifications to be applied to the evolutionary multi-objective algorithm NSGA2 in order to solve the routing problem, we describe a number of real-time single-objective solution strategies, and we finally use the gained efficient trade-off solutions of NSGA2 to exemplarily evaluate the real-time strategies. Our results show that the evolutionary multi-objective approach is well-suited to generate benchmarks for assessing dynamic heuristic strategies. Our findings point into future directions for designing dynamic multi-objective approaches for the vehicle routing problem with time windows.
}},
  author       = {{Grimme, C and Meisel, S and Trautmann, Heike and Rudolph, G and Wölck, M}},
  booktitle    = {{Proceedings of the European Conference On Information Systems}},
  title        = {{{Multi-Objective Analysis of Approaches to Dynamic Routing Of a Vehicle}}},
  year         = {{2015}},
}

@article{46380,
  abstract     = {{We present methods to answer two basic questions that arise when benchmarking optimization algorithms. The first one is: which algorithm is the "best" one? and the second one is: which algorithm should I use for my real-world problem? Both are connected and neither is easy to answer. We present a theoretical framework for designing and analyzing the raw data of such benchmark experiments. This represents a first step in answering the aforementioned questions. The 2009 and 2010 BBOB benchmark results are analyzed by means of this framework and we derive insight regarding the answers to the two questions. Furthermore, we discuss how to properly aggregate rankings from algorithm evaluations on individual problems into a consensus, its theoretical background and which common pitfalls should be avoided. Finally, we address the grouping of test problems into sets with similar optimizer rankings and investigate whether these are reflected by already proposed test problem characteristics, finding that this is not always the case.}},
  author       = {{Mersmann, O and Preuss, M and Trautmann, Heike and Bischl, B and Weihs, C}},
  journal      = {{Evolutionary Computation Journal}},
  number       = {{1}},
  pages        = {{161–185}},
  title        = {{{Analyzing the BBOB Results by Means of Benchmarking Concepts}}},
  volume       = {{23}},
  year         = {{2015}},
}

@inproceedings{48838,
  abstract     = {{The majority of algorithms can be controlled or adjusted by parameters. Their values can substantially affect the algorithms’ performance. Since the manual exploration of the parameter space is tedious – even for few parameters – several automatic procedures for parameter tuning have been proposed. Recent approaches also take into account some characteristic properties of the problem instances, frequently termed instance features. Our contribution is the proposal of a novel concept for feature-based algorithm parameter tuning, which applies an approximating surrogate model for learning the continuous feature-parameter mapping. To accomplish this, we learn a joint model of the algorithm performance based on both the algorithm parameters and the instance features. The required data is gathered using a recently proposed acquisition function for model refinement in surrogate-based optimization: the profile expected improvement. This function provides an avenue for maximizing the information required for the feature-parameter mapping, i.e., the mapping from instance features to the corresponding optimal algorithm parameters. The approach is validated by applying the tuner to exemplary evolutionary algorithms and problems, for which theoretically grounded or heuristically determined feature-parameter mappings are available.}},
  author       = {{Bossek, Jakob and Bischl, Bernd and Wagner, Tobias and Rudolph, Günter}},
  booktitle    = {{Proceedings of the Genetic and Evolutionary Computation Conference}},
  isbn         = {{978-1-4503-3472-3}},
  keywords     = {{evolutionary algorithms, model-based optimization, parameter tuning}},
  pages        = {{1319–1326}},
  publisher    = {{Association for Computing Machinery}},
  title        = {{{Learning Feature-Parameter Mappings for Parameter Tuning via the Profile Expected Improvement}}},
  doi          = {{10.1145/2739480.2754673}},
  year         = {{2015}},
}

@inproceedings{48887,
  abstract     = {{We evaluate the performance of a multi-objective evolutionary algorithm on a class of dynamic routing problems with a single vehicle. In particular we focus on relating algorithmic performance to the most prominent characteristics of problem instances. The routing problem considers two types of customers: mandatory customers must be visited whereas optional customers do not necessarily have to be visited. Moreover, mandatory customers are known prior to the start of the tour whereas optional customers request for service at later points in time with the vehicle already being on its way. The multi-objective optimization problem then results as maximizing the number of visited customers while simultaneously minimizing total travel time. As an a-posteriori evaluation tool, the evolutionary algorithm aims at approximating the related Pareto set for specifically designed benchmarking instances differing in terms of number of customers, geographical layout, fraction of mandatory customers, and request times of optional customers. Conceptional and experimental comparisons to online heuristic procedures are provided.}},
  author       = {{Meisel, Stephan and Grimme, Christian and Bossek, Jakob and Wölck, Martin and Rudolph, Günter and Trautmann, Heike}},
  booktitle    = {{Proceedings of the Genetic and Evolutionary Computation Conference }},
  isbn         = {{978-1-4503-3472-3}},
  keywords     = {{combinatorial optimization, metaheuristics, multi-objective optimization, online algorithms, transportation}},
  pages        = {{425–432}},
  publisher    = {{Association for Computing Machinery}},
  title        = {{{Evaluation of a Multi-Objective EA on Benchmark Instances for Dynamic Routing of a Vehicle}}},
  doi          = {{10.1145/2739480.2754705}},
  year         = {{2015}},
}

