@inproceedings{46388,
  abstract     = {{Understanding the behaviour of well-known algorithms for classical NP-hard optimisation problems is still a difficult task. With this paper, we contribute to this research direction and carry out a feature based comparison of local search and the well-known Christofides approximation algorithm for the Traveling Salesperson Problem. We use an evolutionary algorithm approach to construct easy and hard instances for the Christofides algorithm, where we measure hardness in terms of approximation ratio. Our results point out important features and lead to hard and easy instances for this famous algorithm. Furthermore, our cross-comparison gives new insights on the complementary benefits of the different approaches.}},
  author       = {{Nallaperuma, Samadhi and Wagner, Markus and Neumann, Frank and Bischl, Bernd and Mersmann, Olaf and Trautmann, Heike}},
  booktitle    = {{Proceedings of the Twelfth Workshop on Foundations of Genetic Algorithms XII}},
  isbn         = {{9781450319904}},
  keywords     = {{approximation algorithms, local search, traveling salesperson problem, feature selection, prediction, classification}},
  pages        = {{147–160}},
  publisher    = {{Association for Computing Machinery}},
  title        = {{{A Feature-Based Comparison of Local Search and the Christofides Algorithm for the Travelling Salesperson Problem}}},
  doi          = {{10.1145/2460239.2460253}},
  year         = {{2013}},
}

@inproceedings{46390,
  abstract     = {{In some technical applications like multiobjective online control an evenly spaced approximation of the Pareto front is desired. Since standard evolutionary multiobjective optimization (EMO) algorithms have not been designed for that kind of approximation we propose an archive-based plug-in method that builds an evenly spaced approximation using averaged Hausdorff measure between archive and reference front. In case of three objectives this reference font is constructed from a triangulated approximation of the Pareto front from a previous experiment. The plug-in can be deployed in online or offline mode for any kind of EMO algorithm.}},
  author       = {{Rudolph, G and Trautmann, Heike and Sengupta, S and Schütze, O}},
  booktitle    = {{Evolutionary Multi-Criterion Optimization — 7$^th$ International Conference, EMO 2013, Sheffield, UK, Proceedings}},
  editor       = {{Purshouse, RC and Fleming, PJ and Fonseca, CM and Greco, S and Shaw, J}},
  pages        = {{443–458}},
  publisher    = {{Springer}},
  title        = {{{Evenly Spaced Pareto Front Approximations for Tricriteria Problems Based on Triangulation}}},
  doi          = {{https://doi.org/10.1007/978-3-642-37140-0_34}},
  volume       = {{7811}},
  year         = {{2013}},
}

@inproceedings{46391,
  abstract     = {{Indicator based evolutionary algorithms have caught the interest of many researchers for the treatment of multi-objective optimization problems in the recent past since they deliver the desired approximation of the solution set and due to a usually better performance compared to dominance based algorithms. Nevertheless, these methods still suffer the drawback that many function evaluations are required to obtain a suitable representation of the solution set. The aim of this study is to present the Directed Search (DS) Method as local searcher within global indicator based optimization algorithms. For this, we will present the DS in the context of hypervolume maximization leading to both a new local search algorithm and a new memetic algorithm. Further, we will present first attempts to adapt the DS to a class of parameter dependent problems.}},
  author       = {{Sosa-Hernandez, VA and Schütze, O and Rudoph, G and Trautmann, Heike}},
  booktitle    = {{Proceeding of the Fifteenth Annual Conference Companion on Genetic and Evolutionary Computation Conference Companion}},
  pages        = {{1699–1702}},
  publisher    = {{ACM}},
  title        = {{{Directed Search Method for Indicator-based Multi-objective Evolutionary Algorithms}}},
  doi          = {{10.1145/2464576.2482756}},
  year         = {{2013}},
}

@inproceedings{46387,
  abstract     = {{Here we address the problem of computing finite size Hausdorff approximations of the Pareto front of four-objective optimization problems by means of evolutionary computing. Since many applications desire an approximation evenly spread along the Pareto front and approximations that are good in the Hausdorff sense are typically evenly spread along the Pareto front we consider three different evolutionary multi-objective algorithms tailored to that purpose, where two of them are based on the Part and Selection Algorithm (PSA). Finally, we present some numerical results indicating the strength of the novel methods.}},
  author       = {{Dominguez-Medina, C and Rudolph, G and Schütze, O and Trautmann, Heike}},
  booktitle    = {{Proceedings of the 2013 IEEE Congress on Evolutionary Computation (CEC)}},
  pages        = {{3190–3197}},
  title        = {{{Evenly spaced Pareto fronts of quad-objective problems using PSA partitioning technique}}},
  doi          = {{https://doi.org/10.1109/CEC.2013.6557960}},
  year         = {{2013}},
}

@inproceedings{46389,
  abstract     = {{Current StarCraft bots are not very flexible in their strategy choice, most of them just follow a manually optimized one, usually a rush. We suggest a method of augmenting existing bots via Fuzzy Control in order to make them react on the current game situation. According to the available information, the best matching of a pool of strategies is chosen. While the method is very general and can be applied easily to many bots, we implement it for the existing BTHAI bot and show experimentally how the modifications affects its gameplay, and how it is improved compared to the original version.}},
  author       = {{Preuss, Mike and Kozakowski, Daniel and Hagelbäck, Johan and Trautmann, Heike}},
  booktitle    = {{2013 IEEE Conference on Computational Inteligence in Games (CIG)}},
  pages        = {{1--8}},
  title        = {{{Reactive strategy choice in StarCraft by means of Fuzzy Control}}},
  doi          = {{10.1109/CIG.2013.6633627}},
  year         = {{2013}},
}

@article{46395,
  abstract     = {{In multiobjective optimization, the identification of practically relevant solutions on the Pareto-optimal front is an important research topic. Desirability functions (DFs) allow the preferences of the decision maker to be specified in an intuitive way. Recently, it has been shown for continuous optimization problems that an a priori transformation of the objectives by means of DFs can be used to focus the search of a hypervolume-based evolutionary algorithm on the desired part of the front. In many-objective optimization, however, the computational complexity of the hypervolume can become a crucial part. Thus, an alternative to this approach will be presented in this paper. The new algorithm operates in the untransformed objective space, but the desirability index (DI), that is, a DF-based scalarization, will be used as the second-level selection criterion in the non-dominated sorting. The diversity and uniform distribution of the resulting approximation are ensured by the use of an external archive. In the experiments, different preferences are specified as DFs, and their effects are investigated. It is shown that trade-off solutions are generated in the desired regions of the Pareto-optimal front and with a density adaptive to the DI. The efficiency of the approach with respect to increasing objective space dimension is also analysed using scalable test functions. The convergence speed is superior to other set-based and preference-based evolutionary multiobjective algorithms while the approach is of low computational complexity due to cheap DI evaluations. Copyright © 2013 John Wiley & Sons, Ltd.}},
  author       = {{Trautmann, Heike and Wagner, T and Biermann, D and Weihs, C}},
  journal      = {{Journal of Multi-Criteria Decision Analysis}},
  number       = {{5-6}},
  pages        = {{319–337}},
  title        = {{{Indicator-based Selection in Evolutionary Multiobjective Optimization Algorithms Based On the Desirability Index}}},
  doi          = {{https://doi.org/10.1002/mcda.1503}},
  volume       = {{20}},
  year         = {{2013}},
}

@inproceedings{46393,
  abstract     = {{In multi-objective optimization, set-based performance indicators have become the state of the art for assessing the quality of Pareto front approximations. As a consequence, they are also more and more used within the design of multi-objective optimization algorithms. The R2 and the Hypervolume (HV) indicator represent two popular examples. In order to understand the behavior and the approximations preferred by these indicators and algorithms, a comprehensive knowledge of the indicator’s properties is required. Whereas this knowledge is available for the HV, we presented a first approach in this direction for the R2 indicator just recently. In this paper, we build upon this knowledge and enhance the considerations with respect to the integration of preferences into the R2 indicator. More specifically, we analyze the effect of the reference point, the domain of the weights, and the distribution of weight vectors on the optimization of $\mu$ solutions with respect to the R2 indicator. By means of theoretical findings and empirical evidence, we show the potentials of these three possibilities using the optimal distribution of $\mu$ solutions for exemplary setups.}},
  author       = {{Wagner, Tobias and Trautmann, Heike and Brockhoff, Dimo}},
  booktitle    = {{Evolutionary Multi-Criterion Optimization}},
  editor       = {{Purshouse, Robin C. and Fleming, Peter J. and Fonseca, Carlos M. and Greco, Salvatore and Shaw, Jane}},
  isbn         = {{978-3-642-37140-0}},
  pages        = {{81–95}},
  publisher    = {{Springer Berlin Heidelberg}},
  title        = {{{Preference Articulation by Means of the R2 Indicator}}},
  year         = {{2013}},
}

@inproceedings{46392,
  abstract     = {{An indicator-based evolutionary multiobjective optimization algorithm (EMOA) is introduced which incorporates the contribution to the unary R2-indicator as the secondary selection criterion. First experiments indicate that the R2-EMOA accurately approximates the Pareto front of the considered continuous multiobjective optimization problems. Furthermore, decision makers’ preferences can be included by adjusting the weight vector distributions of the indicator which results in a focused search behavior.}},
  author       = {{Trautmann, Heike and Wagner, Tobias and Brockhoff, Dimo}},
  booktitle    = {{Learning and Intelligent Optimization}},
  editor       = {{Nicosia, Giuseppe and Pardalos, Panos}},
  isbn         = {{978-3-642-44973-4}},
  pages        = {{70–74}},
  publisher    = {{Springer Berlin Heidelberg}},
  title        = {{{R2-EMOA: Focused Multiobjective Search Using R2-Indicator-Based Selection}}},
  year         = {{2013}},
}

@article{48889,
  abstract     = {{Meta-heuristics are frequently used to tackle NP-hard combinatorial optimization problems. With this paper we contribute to the understanding of the success of 2-opt based local search algorithms for solving the traveling salesperson problem (TSP). Although 2-opt is widely used in practice, it is hard to understand its success from a theoretical perspective. We take a statistical approach and examine the features of TSP instances that make the problem either hard or easy to solve. As a measure of problem difficulty for 2-opt we use the approximation ratio that it achieves on a given instance. Our investigations point out important features that make TSP instances hard or easy to be approximated by 2-opt.}},
  author       = {{Mersmann, Olaf and Bischl, Bernd and Trautmann, Heike and Wagner, Markus and Bossek, Jakob and Neumann, Frank}},
  issn         = {{1012-2443}},
  journal      = {{Annals of Mathematics and Artificial Intelligence}},
  keywords     = {{2-opt, 90B06, Classification, Feature selection, MARS, TSP}},
  number       = {{2}},
  pages        = {{151–182}},
  title        = {{{A Novel Feature-Based Approach to Characterize Algorithm Performance for the Traveling Salesperson Problem}}},
  doi          = {{10.1007/s10472-013-9341-2}},
  volume       = {{69}},
  year         = {{2013}},
}

@inproceedings{47161,
  author       = {{Fahl, Sascha and Harbach, Marian and Acar, Yasemin and Smith, Matthew}},
  booktitle    = {{Proceedings of the Ninth Symposium on Usable Privacy and Security}},
  publisher    = {{ACM}},
  title        = {{{On the ecological validity of a password study}}},
  doi          = {{10.1145/2501604.2501617}},
  year         = {{2013}},
}

@article{46394,
  abstract     = {{Meta-heuristics are frequently used to tackle NP-hard combinatorial optimization problems. With this paper we contribute to the understanding of the success of 2-opt based local search algorithms for solving the traveling salesperson problem (TSP). Although 2-opt is widely used in practice, it is hard to understand its success from a theoretical perspective. We take a statistical approach and examine the features of TSP instances that make the problem either hard or easy to solve. As a measure of problem difficulty for 2-opt we use the approximation ratio that it achieves on a given instance. Our investigations point out important features that make TSP instances hard or easy to be approximated by 2-opt.}},
  author       = {{Mersmann, O and Bischl, B and Trautmann, Heike and Wagner, M and Bossek, Jakob and Neumann, F}},
  journal      = {{Annals of Mathematics and Artificial Intelligence}},
  pages        = {{151–182}},
  title        = {{{A Novel Feature-Based Approach to Characterize Algorithm Performance for the Traveling Salesman Problem}}},
  volume       = {{69}},
  year         = {{2013}},
}

@inproceedings{36919,
  abstract     = {{Faced with increasing demands on energy efficiency, current electronic systems operate according to complex power management schemes including more and more fine-grained voltage frequency scaling and power shutdown scenarios. Consequently, validation of the power design intent should begin as early as possible at electronic system-level (ESL) together with first executable system specifications for integrity tests. However, today's system-level design methodologies usually focus on the abstraction of digital logic and time, so that typical low-power aspects cannot be considered so far. In this paper, we present a high-level modeling approach on top of the SystemC/TLM standard to simulate power distribution and voltage based implications in a "loosely-timed" functional execution context. The approach reuses legacy TLM models and prevents the need for detailed lock-step process synchronization in contrast to existing methods. A case study derived from an open source low-power design demonstrates the efficiency of our approach in terms of simulation performance and testability.}},
  author       = {{Mischkalla, Fabian and Müller, Wolfgang}},
  keywords     = {{Time-varying systems, Time-domain analysis, Synchronization, Context modeling, Clocks, Semantics, Standards}},
  publisher    = {{IEEE}},
  title        = {{{Efficient Power-Intent Validation Using "Loosely-Timed" Simulation Models: A Non-Invasive Approach}}},
  doi          = {{10.1109/PATMOS.2013.6662171}},
  year         = {{2013}},
}

@inproceedings{36920,
  abstract     = {{In the electronic system development, energy consumption is clearly becoming one of the most important design concerns. From the system level point of view, Dynamic Power Management (DPM) and Dynamic Voltage and Frequency Scaling (DVFS) are two mostly applied techniques to adjust the tradeoff between the performance and power dissipation at runtime. In this paper, we study the problem of combined application of both techniques with regard to hard real-time systems running on cluster-based multi-core processors. To optimize the processor energy consumption, a heuristic based on simulated annealing with efficient termination criterion is proposed. The experiment results show that the proposed algorithm outperforms the existing approaches in terms of the energy reduction. }},
  author       = {{He, Da and Müller, Wolfgang}},
  booktitle    = {{Proceedings of the International Conference on Applied Computing (AC)}},
  editor       = {{Weghorn, Hans}},
  isbn         = {{978-989-8533-20-3 }},
  keywords     = {{Dynamic Power Management, Dynamic Voltage and Frequency Scaling, Hard Real-Time, Multi-core Processor}},
  title        = {{{An Energy-Efficient Heuristic for Hard Real-Time System on Multi-Core Processors}}},
  year         = {{2013}},
}

@phdthesis{8425,
  abstract     = {{This thesis studies three topics in quantum computation and information: The approximability of quantum problems, quantum proof systems, and non-classical correlations in quantum systems. 

In the first area, we demonstrate a polynomial-time (classical) approximation algorithm for dense instances of the canonical QMA-complete quantum constraint satisfaction problem, the local Hamiltonian problem. In the opposite direction, we next introduce a quantum generalization of the polynomial-time hierarchy, and define problems which we prove are not only complete for the second level of this hierarchy, but are in fact hard to approximate. 

In the second area, we study variants of the interesting and stubbornly open question of whether a quantum proof system with multiple unentangled quantum provers is equal in expressive power to a proof system with a single quantum prover. Our results concern classes such as BellQMA(poly), and include a novel proof of perfect parallel repetition for SepQMA(m) based on cone programming duality. 

In the third area, we study non-classical quantum correlations beyond entanglement, often dubbed "non-classicality". Among our results are two novel schemes for quantifying non-classicality: The first proposes the new paradigm of exploiting local unitary operations to study non-classical correlations, and the second introduces a protocol through which non-classical correlations in a starting system can be "activated" into distillable entanglement with an ancilla system. 

An introduction to all required linear algebra and quantum mechanics is included.}},
  author       = {{Gharibian, Sevag}},
  pages        = {{240}},
  title        = {{{Approximation, Proof Systems, and Correlations in a Quantum World}}},
  year         = {{2013}},
}

@article{8173,
  abstract     = {{We study three variants of multi-prover quantum Merlin-Arthur proof systems. We first show that the class of problems that can be efficiently verified using polynomially many quantum proofs, each of logarithmic-size, is exactly MQA (also known as QCMA), the class of problems which can be efficiently verified via a classical proof and a quantum verifier. We then study the class BellQMA(poly), characterized by a verifier who first applies unentangled, nonadaptive measurements to each of the polynomially many proofs, followed by an arbitrary but efficient quantum verification circuit on the resulting measurement outcomes. We show that if the number of outcomes per nonadaptive measurement is a polynomially-bounded function, then the expressive power of the proof system is exactly QMA. Finally, we study a class equivalent to QMA(m), denoted SepQMA(m), where the verifier's measurement operator corresponding to outcome "accept" is a fully separable operator across the m quantum proofs. Using cone programming duality, we give an alternate proof of a result of Harrow and Montanaro [FOCS, pp. 633--642 (2010)] that shows a perfect parallel repetition theorem for SepQMA(m) for any m.}},
  author       = {{Gharibian, Sevag and Sikora, Jamie and Upadhyay, Sarvagya}},
  journal      = {{Quantum Information & Computation}},
  number       = {{1-2}},
  pages        = {{135--157}},
  title        = {{{QMA variants with polynomially many provers}}},
  volume       = {{13}},
  year         = {{2013}},
}

@inproceedings{528,
  abstract     = {{Cold-boot attacks exploit the fact that DRAM contents are not immediately lost when a PC is powered off. Instead the contents decay rather slowly, in particular if the DRAM chips are cooled to low temperatures. This effect opens an attack vector on cryptographic applications that keep decrypted keys in DRAM. An attacker with access to the target computer can reboot it or remove the RAM modules and quickly copy the RAM contents to non-volatile memory. By exploiting the known cryptographic structure of the cipher and layout of the key data in memory, in our application an AES key schedule with redundancy, the resulting memory image can be searched for sections that could correspond to decayed cryptographic keys; then, the attacker can attempt to reconstruct the original key. However, the runtime of these algorithms grows rapidly with increasing memory image size, error rate and complexity of the bit error model, which limits the practicability of the approach.In this work, we study how the algorithm for key search can be accelerated with custom computing machines. We present an FPGA-based architecture on a Maxeler dataflow computing system that outperforms a software implementation up to 205x, which significantly improves the practicability of cold-attacks against AES.}},
  author       = {{Riebler, Heinrich and Kenter, Tobias and Sorge, Christoph and Plessl, Christian}},
  booktitle    = {{Proceedings of the International Conference on Field-Programmable Technology (FPT)}},
  keywords     = {{coldboot}},
  pages        = {{386--389}},
  publisher    = {{IEEE}},
  title        = {{{FPGA-accelerated Key Search for Cold-Boot Attacks against AES}}},
  doi          = {{10.1109/FPT.2013.6718394}},
  year         = {{2013}},
}

@inproceedings{505,
  abstract     = {{In this paper we introduce “On-The-Fly Computing”, our vision of future IT services that will be provided by assembling modular software components available on world-wide markets. After suitable components have been found, they are automatically integrated, configured and brought to execution in an On-The-Fly Compute Center. We envision that these future compute centers will continue to leverage three current trends in large scale computing which are an increasing amount of parallel processing, a trend to use heterogeneous computing resources, and—in the light of rising energy cost—energy-efficiency as a primary goal in the design and operation of computing systems. In this paper, we point out three research challenges and our current work in these areas.}},
  author       = {{Happe, Markus and Kling, Peter and Plessl, Christian and Platzner, Marco and Meyer auf der Heide, Friedhelm}},
  booktitle    = {{Proceedings of the 9th IEEE Workshop on Software Technology for Future embedded and Ubiquitous Systems (SEUS)}},
  publisher    = {{IEEE}},
  title        = {{{On-The-Fly Computing: A Novel Paradigm for Individualized IT Services}}},
  doi          = {{10.1109/ISORC.2013.6913232}},
  year         = {{2013}},
}

@inproceedings{1787,
  author       = {{Suess, Tim and Schoenrock, Andrew and Meisner, Sebastian and Plessl, Christian}},
  booktitle    = {{Proc. Int. Symp. on Parallel and Distributed Processing Workshops (IPDPSW)}},
  isbn         = {{978-0-7695-4979-8}},
  pages        = {{64--73}},
  publisher    = {{IEEE Computer Society}},
  title        = {{{Parallel Macro Pipelining on the Intel SCC Many-Core Computer}}},
  doi          = {{10.1109/IPDPSW.2013.136}},
  year         = {{2013}},
}

@article{15180,
  author       = {{Owen, G Scott and Domik, Gitta and Ebert, David S and Kohlhammer, Jörn and Rushmeier, Holly and Santos, Beatriz Sousa and Weiskopf, Daniel}},
  journal      = {{IEEE computer graphics and applications}},
  number       = {{4}},
  pages        = {{14--19}},
  publisher    = {{IEEE}},
  title        = {{{How visualization courses have changed over the past 10 years}}},
  doi          = {{10.1109/MCG.2013.57}},
  volume       = {{33}},
  year         = {{2013}},
}

@article{60453,
  abstract     = {{<jats:p>
            The most popular and actively researched class of quad remeshing techniques is the family of
            <jats:italic>parametrization based quad meshing methods</jats:italic>
            . They all strive to generate an
            <jats:italic>integer-grid map</jats:italic>
            , i.e. a parametrization of the input surface into R
            <jats:sup>2</jats:sup>
            such that the canonical grid of integer iso-lines forms a quad mesh when mapped back onto the surface in R
            <jats:sup>3</jats:sup>
            . An essential, albeit broadly neglected aspect of these methods is the
            <jats:italic>quad extraction</jats:italic>
            step, i.e. the materialization of an actual quad mesh from the mere "quad texture". Quad (mesh) extraction is often believed to be a trivial matter but quite the opposite is true: numerous special cases, ambiguities induced by numerical inaccuracies and limited solver precision, as well as imperfections in the maps produced by most methods (unless costly countermeasures are taken) pose significant challenges to the quad extractor. We present a method to sanitize a provided parametrization such that it becomes numerically consistent even in a limited precision floating point representation. Based on this we are able to provide a comprehensive and sound description of how to perform quad extraction robustly and without the need for any complex tolerance thresholds or disambiguation rules. On top of that we develop a novel strategy to cope with common local fold-overs in the parametrization. This allows our method, dubbed
            <jats:italic>QEx</jats:italic>
            , to generate all-quadrilateral meshes where otherwise holes, non-quad polygons or no output at all would have been produced. We thus enable the practical use of an entire class of maps that was previously considered defective. Since state of the art quad meshing methods spend a significant share of their run time solely to prevent local fold-overs, using our method it is now possible to obtain quad meshes significantly quicker than before. We also provide libQEx, an open source C++ reference implementation of our method and thus significantly lower the bar to enter the field of quad meshing.
          </jats:p>}},
  author       = {{Ebke, Hans-Christian and Bommes, David and Campen, Marcel and Kobbelt, Leif}},
  issn         = {{0730-0301}},
  journal      = {{ACM Transactions on Graphics}},
  number       = {{6}},
  pages        = {{1--10}},
  publisher    = {{Association for Computing Machinery (ACM)}},
  title        = {{{QEx}}},
  doi          = {{10.1145/2508363.2508372}},
  volume       = {{32}},
  year         = {{2013}},
}

