@inproceedings{3119,
  author       = {{Hofheinz, Dennis and Jager, Tibor}},
  booktitle    = {{Theory of Cryptography - 13th International Conference, TCC 2016-A, Tel Aviv, Israel, January 10-13, 2016, Proceedings, Part I}},
  pages        = {{336----362}},
  title        = {{{Verifiable Random Functions from Standard Assumptions}}},
  doi          = {{10.1007/978-3-662-49096-9_14}},
  year         = {{2016}},
}

@inproceedings{3157,
  author       = {{Beringer, Steffen and Wehrheim, Heike}},
  booktitle    = {{Critical Systems: Formal Methods and Automated Verification - Joint 21st International Workshop on Formal Methods for Industrial Critical Systems and 16th International Workshop on Automated Verification of Critical Systems, FMICS-AVoCS 2016, Pisa, Italy, September 26-28, 2016, Proceedings}},
  editor       = {{H. ter Beek, Maurice and Gnesi, Stefania and Knapp, Alexander}},
  pages        = {{189----204}},
  title        = {{{Verification of AUTOSAR Software Architectures with Timed Automata}}},
  doi          = {{10.1007/978-3-319-45943-1_13}},
  year         = {{2016}},
}

@inproceedings{3158,
  author       = {{Travkin, Oleg and Wehrheim, Heike}},
  booktitle    = {{Theoretical Aspects of Computing - {ICTAC} 2016 - 13th International Colloquium, Taipei, Taiwan, ROC, October 24-31, 2016, Proceedings}},
  editor       = {{Sampaio, Augusto and Wang, Farn}},
  pages        = {{3----24}},
  title        = {{{Verification of Concurrent Programs on Weak Memory Models}}},
  doi          = {{10.1007/978-3-319-46750-4_1}},
  year         = {{2016}},
}

@inproceedings{3159,
  author       = {{Schellhorn, Gerhard and Travkin, Oleg and Wehrheim, Heike}},
  booktitle    = {{Integrated Formal Methods - 12th International Conference, {IFM} 2016, Reykjavik, Iceland, June 1-5, 2016, Proceedings}},
  editor       = {{Huisman, Marieke}},
  pages        = {{193----209}},
  title        = {{{Towards a Thread-Local Proof Technique for Starvation Freedom}}},
  doi          = {{10.1007/978-3-319-33693-0_13}},
  year         = {{2016}},
}

@inproceedings{3160,
  author       = {{Doherty, Simon and Dongol, Brijesh and Derrick, John and Schellhorn, Gerhard and Wehrheim, Heike}},
  booktitle    = {{20th International Conference on Principles of Distributed Systems, {OPODIS} 2016, December 13-16, 2016, Madrid, Spain}},
  editor       = {{Fatourou, Panagiota and Jim{\'{e}}nez, Ernesto and Pedone, Fernando}},
  pages        = {{35:1----35:17}},
  title        = {{{Proving Opacity of a Pessimistic {STM}}}},
  doi          = {{10.4230/LIPIcs.OPODIS.2016.35}},
  year         = {{2016}},
}

@article{3161,
  author       = {{Isenberg, Tobias and Jakobs, Marie{-}Christine and Pauck, Felix and Wehrheim, Heike}},
  journal      = {{CoRR}},
  title        = {{{Deriving approximation tolerance constraints from verification runs}}},
  year         = {{2016}},
}

@misc{210,
  author       = {{Leder, Lennart}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Congestion Games with Mixed Objectives}}},
  year         = {{2016}},
}

@misc{213,
  author       = {{Porzenheim, Laurens}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Comparison of different Definitions of Chosen-Ciphertext Security in Encryption schemes}}},
  year         = {{2016}},
}

@misc{214,
  author       = {{Bemmann, Kai Sören}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Commitment Schemes - Definitions, Variants, and Security}}},
  year         = {{2016}},
}

@inproceedings{215,
  abstract     = {{We present three robust overlay networks: First, we present a network that organizes the nodes into an expander and is resistant to even massive adversarial churn. Second, we develop a network based on the hypercube that maintains connectivity under adversarial DoS-attacks. For the DoS-attacks we use the notion of a Omega(log log n)-late adversary which only has access to topological information that is at least Omega(log log n) rounds old. Finally, we develop a network that combines both churn- and DoS-resistance. The networks gain their robustness through constant network reconfiguration, i.e., the topology of the networks changes constantly. Our reconguration algorithms are based on node sampling primitives for expanders and hypercubes that allow each node to sample a logarithmic number of nodes uniformly at random in O(log log n) communication rounds. These primitives are specific to overlay networks and their optimal runtime represents an exponential improvement over known techniques. Our results have a wide range of applications, for example in the area of scalable and robust peer-to-peer systems.}},
  author       = {{Drees, Maximilian and Gmyr, Robert and Scheideler, Christian}},
  booktitle    = {{Proceedings of the 28th ACM Symposium on Parallelism in Algorithms and Architectures (SPAA)}},
  pages        = {{417----427}},
  title        = {{{Churn- and DoS-resistant Overlay Networks Based on Network Reconfiguration}}},
  doi          = {{10.1145/2935764.2935783}},
  year         = {{2016}},
}

@article{175,
  abstract     = {{Today, service compositions often need to be assembled or changed on-the-fly, which leaves only little time for quality assurance. Moreover, quality assurance is complicated by service providers only giving information on their services in terms of domain specific concepts with only limited semantic meaning.In this paper, we propose a method for constructing service compositions based on pre-verified templates. Templates, given as workflow descriptions, are typed over a (domain-independent) template ontology defining concepts and predicates. Their meaning is defined by an abstract semantics, leaving the specific meaning of ontology concepts open, however, only up to given ontology rules. Templates are proven correct using a Hoare-style proof calculus, extended by a specific rule for service calls. Construction of service compositions amounts to instantiation of templates with domain-specific services. Correctness of an instantiation can then simply be checked by verifying that the domain ontology (a) adheres to the rules of the template ontology, and (b) fulfills the constraints of the employed template.}},
  author       = {{Walther, Sven and Wehrheim, Heike}},
  journal      = {{Science of Computer Programming}},
  pages        = {{2----23}},
  publisher    = {{Elsevier}},
  title        = {{{On-The-Fly Construction of Provably Correct Service Compositions - Templates and Proofs}}},
  doi          = {{10.1016/j.scico.2016.04.002}},
  year         = {{2016}},
}

@inproceedings{17655,
  author       = {{Polevoy, Gleb and de Weerdt, M.M. and Jonker, C.M.}},
  booktitle    = {{Proceedings of the 2016 European Conference on Artificial Intelligence}},
  keywords     = {{agents, action, repeated reciprocation, fixed, floating, network, Nash equilibrium, social welfare, price of anarchy, price of stability, convex combination}},
  pages        = {{417--425}},
  title        = {{{The Game of Reciprocation Habits}}},
  doi          = {{10.3233/978-1-61499-672-9-417}},
  volume       = {{Volume 285: ECAI 2016}},
  year         = {{2016}},
}

@inproceedings{17656,
  author       = {{Polevoy, Gleb and de Weerdt, Mathijs and Jonker, Catholijn}},
  booktitle    = {{Proceedings of the 2016 International Conference on Autonomous Agents and Multiagent Systems}},
  isbn         = {{978-1-4503-4239-1}},
  keywords     = {{agent's influence, behavior, convergence, perron-frobenius, reciprocal interaction, repeated reciprocation}},
  pages        = {{1431--1432}},
  publisher    = {{International Foundation for Autonomous Agents and Multiagent Systems}},
  title        = {{{The Convergence of Reciprocation}}},
  year         = {{2016}},
}

@inproceedings{177,
  abstract     = {{Efficiently parallelizable parameterized problems have been classified as being either in the class FPP (fixed-parameter parallelizable) or the class PNC (parameterized analog of NC), which contains FPP as a subclass. In this paper, we propose a more restrictive class of parallelizable parameterized problems called fixed-parameter parallel-tractable (FPPT). For a problem to be in FPPT, it should possess an efficient parallel algorithm not only from a theoretical standpoint but in practice as well. The primary distinction between FPPT and FPP is the parallel processor utilization, which is bounded by a polynomial function in the case of FPPT. We initiate the study of FPPT with the well-known k-vertex cover problem. In particular, we present a parallel algorithm that outperforms the best known parallel algorithm for this problem: using O(m) instead of O(n2) parallel processors, the running time improves from 4logn+O(kk) to O(k⋅log3n), where m is the number of edges, n is the number of vertices of the input graph, and k is an upper bound of the size of the sought vertex cover. We also note that a few P-complete problems fall into FPPT including the monotone circuit value problem (MCV) when the underlying graphs are bounded by a constant Euler genus.}},
  author       = {{Abu-Khzam, Faisal N. and Li, Shouwei and Markarian, Christine and Meyer auf der Heide, Friedhelm and Podlipyan, Pavel}},
  booktitle    = {{Proceedings of the 10th International Conference on Combinatorial Optimization and Applications (COCOA)}},
  pages        = {{477--488}},
  title        = {{{On the Parameterized Parallel Complexity and the Vertex Cover Problem}}},
  doi          = {{10.1007/978-3-319-48749-6_35}},
  year         = {{2016}},
}

@misc{187,
  booktitle    = {{Transactions on Parallel Computing (TOPC)}},
  editor       = {{Meyer auf der Heide, Friedhelm}},
  number       = {{1}},
  pages        = {{1}},
  title        = {{{Introduction to the Special Issue on SPAA 2014}}},
  doi          = {{10.1145/2936716}},
  year         = {{2016}},
}

@inproceedings{19,
  abstract     = {{Version Control Systems (VCS) are a valuable tool for software development
and document management. Both client/server and distributed (Peer-to-Peer)
models exist, with the latter (e.g., Git and Mercurial) becoming
increasingly popular. Their distributed nature introduces complications,
especially concerning security: it is hard to control the dissemination of
contents stored in distributed VCS as they rely on replication of complete
repositories to any involved user.

We overcome this issue by designing and implementing a concept for
cryptography-enforced access control which is transparent to the user. Use
of field-tested schemes (end-to-end encryption, digital signatures) allows
for strong security, while adoption of convergent encryption and
content-defined chunking retains storage efficiency. The concept is
seamlessly integrated into Mercurial---respecting its distributed storage
concept---to ensure practical usability and compatibility to existing
deployments.}},
  author       = {{Lass, Michael and Leibenger, Dominik and Sorge, Christoph}},
  booktitle    = {{Proc. 41st Conference on Local Computer Networks (LCN)}},
  isbn         = {{978-1-5090-2054-6}},
  keywords     = {{access control, distributed version control systems, mercurial, peer-to-peer, convergent encryption, confidentiality, authenticity}},
  publisher    = {{IEEE}},
  title        = {{{Confidentiality and Authenticity for Distributed Version Control Systems - A Mercurial Extension}}},
  doi          = {{10.1109/lcn.2016.11}},
  year         = {{2016}},
}

@article{190,
  abstract     = {{Today, software components are provided by global markets in the form of services. In order to optimally satisfy service requesters and service providers, adequate techniques for automatic service matching are needed. However, a requester’s requirements may be vague and the information available about a provided service may be incomplete. As a consequence, fuzziness is induced into the matching procedure. The contribution of this paper is the development of a systematic matching procedure that leverages concepts and techniques from fuzzy logic and possibility theory based on our formal distinction between different sources and types of fuzziness in the context of service matching. In contrast to existing methods, our approach is able to deal with imprecision and incompleteness in service specifications and to inform users about the extent of induced fuzziness in order to improve the user’s decision-making. We demonstrate our approach on the example of specifications for service reputation based on ratings given by previous users. Our evaluation based on real service ratings shows the utility and applicability of our approach.}},
  author       = {{Platenius, Marie Christin and Shaker, Ammar and Becker, Matthias and Hüllermeier, Eyke and Schäfer, Wilhelm}},
  journal      = {{IEEE Transactions on Software Engineering (TSE), presented at ICSE 2017}},
  number       = {{8}},
  pages        = {{739--759}},
  publisher    = {{IEEE}},
  title        = {{{Imprecise Matching of Requirements Specifications for Software Services using Fuzzy Logic}}},
  doi          = {{10.1109/TSE.2016.2632115}},
  year         = {{2016}},
}

@inproceedings{2367,
  abstract     = {{One of the most popular fuzzy clustering techniques is the fuzzy K-means algorithm (also known as fuzzy-c-means or FCM algorithm). In contrast to the K-means and K-median problem, the underlying fuzzy K-means problem has not been studied from a theoretical point of view. In particular, there are no algorithms with approximation guarantees similar to the famous K-means++ algorithm known for the fuzzy K-means problem. This work initiates the study of the fuzzy K-means problem from an algorithmic and complexity theoretic perspective. We show that optimal solutions for the fuzzy K-means problem cannot, in general, be expressed by radicals over the input points. Surprisingly, this already holds for simple inputs in one-dimensional space. Hence, one cannot expect to compute optimal solutions exactly. We give the first (1+eps)-approximation algorithms for the fuzzy K-means problem. First, we present a deterministic approximation algorithm whose runtime is polynomial in N and linear in the dimension D of the input set, given that K is constant, i.e. a polynomial time approximation scheme (PTAS) for fixed K. We achieve this result by showing that for each soft clustering there exists a hard clustering with similar properties. Second, by using techniques known from coreset constructions for the K-means problem, we develop a deterministic approximation algorithm that runs in time almost linear in N but exponential in the dimension D. We complement these results with a randomized algorithm which imposes some natural restrictions on the sought solution and whose runtime is comparable to some of the most efficient approximation algorithms for K-means, i.e. linear in the number of points and the dimension, but exponential in the number of clusters.}},
  author       = {{Blömer, Johannes and Brauer, Sascha and Bujna, Kathrin}},
  booktitle    = {{2016 IEEE 16th International Conference on Data Mining (ICDM)}},
  isbn         = {{9781509054732}},
  keywords     = {{unsolvability by radicals, clustering, fuzzy k-means, probabilistic method, approximation algorithms, randomized algorithms}},
  location     = {{Barcelona, Spain}},
  pages        = {{805--810}},
  publisher    = {{IEEE}},
  title        = {{{A Theoretical Analysis of the Fuzzy K-Means Problem}}},
  doi          = {{10.1109/icdm.2016.0094}},
  year         = {{2016}},
}

@inproceedings{20556,
  author       = {{Bodden, Eric and I Pun, Ka and Steffen, Martin and Stolz, Volker and Wickert, Anna-Katharina}},
  booktitle    = {{Leveraging Applications of Formal Methods, Verification and Validation: Foundational Techniques - 7th International Symposium, ISoLA 2016, Imperial, Corfu, Greece, October 10-14, 2016, Proceedings, Part {I}}},
  pages        = {{431--445}},
  title        = {{{Information Flow Analysis for Go}}},
  doi          = {{10.1007/978-3-319-47166-2_30}},
  year         = {{2016}},
}

@inproceedings{207,
  abstract     = {{We consider a scheduling problem where machines need to be rented from the cloud in order to process jobs. There are two types of machines available which can be rented for machine-type dependent prices and for arbitrary durations. However, a machine-type dependent setup time is required before a machine is available for processing. Jobs arrive online over time, have machine-type dependent sizes and have individual deadlines. The objective is to rent machines and schedule jobs so as to meet all deadlines while minimizing the rental cost. Since we observe the slack of jobs to have a fundamental influence on the competitiveness, we study the model when instances are parameterized by their (minimum) slack. An instance is called to have a slack of $\beta$ if, for all jobs, the difference between the job's release time and the latest point in time at which it needs to be started is at least $\beta$. While for $\beta series = {LNCS}}},
  author       = {{Mäcker, Alexander and Malatyali, Manuel and Meyer auf der Heide, Friedhelm and Riechers, Sören}},
  booktitle    = {{Proceedings of the 10th Annual International Conference on Combinatorial Optimization and Applications (COCOA)}},
  pages        = {{578----592}},
  title        = {{{Cost-efficient Scheduling on Machines from the Cloud}}},
  doi          = {{10.1007/978-3-319-48749-6_42}},
  year         = {{2016}},
}

