@inproceedings{8051,
  author       = {{Jung, Helge and Nebe, Karsten and Klompmaker, Florian and Fischer, Holger Gerhard}},
  booktitle    = {{Mensch & Computer 2011, 11. fachübergreifende Konferenz für interaktive und kooperative Medien, überMEDIEN - ÜBERmorgen}},
  pages        = {{305--308}},
  publisher    = {{Oldenbourg Wissenschaftsverlag}},
  title        = {{{Authentifizierte Eingaben auf Multitouch-Tischen}}},
  year         = {{2011}},
}

@inproceedings{8052,
  author       = {{Luckey, Markus and Nagel, Benjamin and Gerth, Christian and Engels, Gregor}},
  booktitle    = {{Proceeding of the 6th international symposium on Software engineering for adaptive and self-managing systems}},
  pages        = {{30--39}},
  publisher    = {{ACM}},
  title        = {{{Adapt Cases: Extending Use Cases for Adaptive Systems}}},
  year         = {{2011}},
}

@inproceedings{8053,
  author       = {{Luckey, Markus and Gerth, Christian and Soltenborn, Christian and Engels, Gregor}},
  booktitle    = {{Proceedings of the 8th International Conference on Autonomic Computing (ICAC'11)}},
  publisher    = {{ACM}},
  title        = {{{QUAASY - QUality Assurance of Adaptive SYstems}}},
  doi          = {{http://dx.doi.org/10.1145/1998582.1998617}},
  year         = {{2011}},
}

@inproceedings{8054,
  author       = {{Bandener, Nils and Soltenborn, Christian and Engels, Gregor}},
  booktitle    = {{Proceedings of the 3rd International Conference on Software Language Engineering (SLE 2010)}},
  pages        = {{357--376}},
  publisher    = {{Springer}},
  title        = {{{Extending DMM Behavior Specifications for Visual Execution and Debugging}}},
  doi          = {{http://dx.doi.org/10.1007/978-3-642-19440-5_24}},
  volume       = {{6563}},
  year         = {{2011}},
}

@techreport{8221,
  author       = {{Brüseke, Frank and Christ, Fabian and Sauer, Stefan and Wübbeke, Andreas}},
  publisher    = {{University of Paderborn, Software Quality Lab (s-lab)}},
  title        = {{{Testen von Software-Produktlinien}}},
  year         = {{2011}},
}

@inbook{6293,
  author       = {{Nebe, Karsten and Klompmaker, Florian and Jung, Helge and Fischer, Holger Gerhard}},
  booktitle    = {{Human-Computer Interaction. Interaction Techniques and Environments.}},
  editor       = {{Jacko, Julie Anne}},
  isbn         = {{9783642216046}},
  issn         = {{0302-9743}},
  location     = {{Orlando, USA}},
  pages        = {{100--109}},
  publisher    = {{Springer Berlin Heidelberg}},
  title        = {{{Exploiting New Interaction Techniques for Disaster Control Management Using Multitouch-, Tangible- and Pen-Based-Interaction}}},
  doi          = {{10.1007/978-3-642-21605-3_11}},
  volume       = {{6762}},
  year         = {{2011}},
}

@inproceedings{6298,
  author       = {{Nebe, Karsten and Fischer, Holger Gerhard and Klompmaker, Florian and Jung, Helge}},
  booktitle    = {{Mensch & Computer 2011, 11. fachübergreifende Konferenz für interaktive und kooperative Medien, überMEDIEN - ÜBERmorgen}},
  pages        = {{263--273}},
  publisher    = {{Oldenbourg Wissenschaftsverlag}},
  title        = {{{Multitouch-, Be-Greifbare- und Stiftbasierte-Interaktion in der Einsatzlageplanung}}},
  year         = {{2011}},
}

@inproceedings{6299,
  author       = {{Jung, Helge and Nebe, Karsten and Klompmaker, Florian and Fischer, Holger Gerhard}},
  booktitle    = {{Mensch & Computer 2011, 11. fachübergreifende Konferenz für interaktive und kooperative Medien, überMEDIEN - ÜBERmorgen}},
  pages        = {{305--308}},
  publisher    = {{Oldenbourg Wissenschaftsverlag}},
  title        = {{{Authentifizierte Eingaben auf Multitouch-Tischen}}},
  year         = {{2011}},
}

@inbook{6300,
  author       = {{Fischer, Holger Gerhard and Nebe, Karsten and Klompmaker, Florian}},
  booktitle    = {{Human Centered Design}},
  editor       = {{Kurosu, Masaaki}},
  isbn         = {{9783642217524}},
  issn         = {{0302-9743}},
  location     = {{Orlando, USA}},
  pages        = {{28--37}},
  publisher    = {{Springer Berlin Heidelberg}},
  title        = {{{A Holistic Model for Integrating Usability Engineering and Software Engineering Enriched with Marketing Activities}}},
  doi          = {{10.1007/978-3-642-21753-1_4}},
  volume       = {{6776}},
  year         = {{2011}},
}

@inproceedings{6301,
  author       = {{Fischer, Holger Gerhard and Bogner, Christian and Geis, Thomas and Polkehn, Knut and Zimmermann, Dirk}},
  booktitle    = {{Jahresband Usability Professionals}},
  pages        = {{72--74}},
  publisher    = {{German UPA}},
  title        = {{{Der Qualitätsstandard für Usability Engineering der German UPA – Aktueller Stand der Arbeiten}}},
  year         = {{2011}},
}

@inproceedings{645,
  abstract     = {{In the standard consensus problem there are n processes with possibly di®erent input values and the goal is to eventually reach a point at which all processes commit to exactly one of these values. We are studying a slight variant of the consensus problem called the stabilizing consensus problem [2]. In this problem, we do not require that each process commits to a ¯nal value at some point, but that eventually they arrive at a common, stable value without necessarily being aware of that. This should work irrespective of the states in which the processes are starting. Our main result is a simple randomized algorithm called median rule that, with high probability, just needs O(logmlog log n + log n) time and work per process to arrive at an almost stable consensus for any set of m legal values as long as an adversary can corrupt the states of at most p n processes at any time. Without adversarial involvement, just O(log n) time and work is needed for a stable consensus, with high probability. As a by-product, we obtain a simple distributed algorithm for approximating the median of n numbers in time O(logmlog log n + log n) under adversarial presence.}},
  author       = {{Doerr, Benjamin and Goldberg, Leslie Ann and Minder, Lorenz and Sauerwald, Thomas and Scheideler, Christian}},
  booktitle    = {{Proceedings of the 23rd ACM Symposium on Parallelism in Algorithms and Architectures (SPAA)}},
  pages        = {{149--158}},
  title        = {{{Stabilizing consensus with the power of two choices}}},
  doi          = {{10.1145/1989493.1989516}},
  year         = {{2011}},
}

@article{7353,
  abstract     = {{Manuelle Testerstellung verursacht hohe Kosten. Im Vergleich dazu bietet modellbasiertes Testen große Vorteile hinsichtlich Testautomatisierung, früher Fehlerfindung, Erhöhung der Testabdeckung, effizienten Testentwurfs und besserer Rückverfolgbarkeit. Die Einführung des modellbasierten Testens ist jedoch mit Investitionen verbunden, für die die Rendite häufig unklar erscheint. Dabei finden sich in der Literatur bereits etliche Erfahrungsberichte zur erfolg­reichen Einführung von modellbasiertem Testen in unterschiedlichen Anwendungsdomänen. In diesem Artikel präsentieren wir einen Überblick über einige dieser Erfahrungsberichte.}},
  author       = {{Weißleder, Stephan and Güldali, Baris and Mlynarski, Michael and Törsel, Arne-Michael and Faragó, David and Prester, Florian and Winter, Mario}},
  journal      = {{OBJEKTspektrum}},
  number       = {{6}},
  pages        = {{59--65}},
  title        = {{{Modellbasiertes Testen: Hype oder Realität?}}},
  year         = {{2011}},
}

@article{7354,
  abstract     = {{Acceptance testing is a time-consuming task for complex software systems that have to fulfill a large number of requirements. To reduce this effort, we have developed a widely automated method for deriving test plans from requirements that are expressed in natural language. It consists of three stages: annotation, clustering, and test plan specification. The general idea is to exploit redundancies and implicit relationships in requirements specifications. Multi-viewpoint techniques based on RM-ODP (Reference Model for Open Distributed Processing) are employed for specifying the requirements. We then use linguistic analysis techniques, requirements clustering algorithms, and pattern-based requirements collection to reduce the total effort of testing against the requirements specification. In particular, we use linguistic analysis for extracting and annotating the actor, process and object of a requirements statement. During clustering, a similarity function is computed as a measure for the overlap of requirements. In the test plan specification stage, our approach provides capabilities for semi-automatically deriving test plans and acceptance criteria from the clustered informal textual requirements. Two patterns are applied to compute a suitable order of test activities. The generated test plans consist of a sequence of test steps and asserts that are executed or checked in the given order. We also present the supporting prototype tool TORC, which is available open source. For the evaluation of the approach, we have conducted a case study in the field of acceptance testing of a national electronic identification system. In summary, we report on lessons learned how linguistic analysis and clustering techniques can help testers in understanding the relations between requirements and for improving test planning.}},
  author       = {{Güldali, Baris and Funke, Holger and Sauer, Stefan and Engels, Gregor}},
  issn         = {{0963-9314}},
  journal      = {{Software Quality Journal}},
  number       = {{4}},
  pages        = {{771--799}},
  publisher    = {{Springer Nature}},
  title        = {{{TORC: test plan optimization by requirements clustering}}},
  doi          = {{10.1007/s11219-011-9149-4}},
  volume       = {{19}},
  year         = {{2011}},
}

@article{7355,
  abstract     = {{Dynamic Meta Modeling (DMM) is a visual semantics specification technique targeted at languages based on a metamodel. A DMM specification consists of a runtime metamodel and operational rules which describe how instances of the runtime metamodel change over time. A known deficiency of the DMM approach is that it does not support the refinement of a DMM specification, e.g., in the case of defining the semantics for a refined and extended domain-specific language (DSL). Up to now, DMM specifications could only be reused by adding or removing DMM rules. In this paper, we enhance DMM such that DMM rules can override other DMM rules, similar to a method being overridden in a subclass, and we show how rule overriding can be realized with the graph transformation tool GROOVE. We argue that rule overriding does not only have positive impact on reusability, but also improves the intuitive understandability of DMM semantics specifications.}},
  author       = {{Soltenborn, Christian and Engels, Gregor}},
  issn         = {{1045-926X}},
  journal      = {{Journal of Visual Languages & Computing}},
  number       = {{3}},
  pages        = {{233--250}},
  publisher    = {{Elsevier BV}},
  title        = {{{Using rule overriding to improve reusability and understandability of Dynamic Meta Modeling specifications}}},
  doi          = {{10.1016/j.jvlc.2010.12.005}},
  volume       = {{22}},
  year         = {{2011}},
}

@inproceedings{7535,
  author       = {{Böckelmann, Irina and Schenk, Daniel and Rößler, Thoralf and Adler, Simon and Senft, Björn and Grubert, Jens and Mecke, Rüdiger and Huckauf, Anke and Urbina, Mario and Tümler, Johannes and Darius, Sabine}},
  title        = {{{Physiologische Beanspruchungsreaktionen bei der Anwendung von kopfgetragenen AR-Displays}}},
  year         = {{2011}},
}

@inproceedings{7536,
  author       = {{Klompmaker, Florian and Senft, Björn and Nebe, Karsten and Busch, Clemens and Willemsen, Detlev}},
  booktitle    = {{{HEALTHINF} 2011 - Proceedings of the International Conference on Health Informatics, Rome, Italy, 26-29 January, 2011}},
  pages        = {{268--273}},
  title        = {{{User Centered Design Process of OSAMi-D - Developing User Interfaces for a Remote Ergometer Training Application}}},
  year         = {{2011}},
}

@inproceedings{8471,
  abstract     = {{Performance is an important quality attribute for business information systems. When a tester has spotted a performance error, the error is passed to the software developers to fix it. However, in component-based software development the tester has to do blame analysis first, i. e. the tester has to decide, which party is responsible to fix the error. If the error is a design or deployment issue, it can be assigned to the software architect or the system deployer. If the error is specific to a component, it needs to be assigned to the corresponding component developer. An accurate blame analysis is important, because wrong assignments of errors will cause a loss of time and money. Our approach aims at doing blame analysis for performance errors by comparing performance metrics obtained in performance testing and performance prediction. We use performance prediction values as expected values for individual components. For performance prediction we use the Palladio approach. By this means, our approach evaluates each component’s performance in a certain test case. If the component performs poorly, its component developer needs to fix the component or the architect replaces the component with a faster one. If no omponent performs poorly, we can deduce that there is a design or deployment issue and the architecture needs to be changed. In this paper, we present an exemplary blame analysis based on a web shop system. The example shows the feasibility of our approach.}},
  author       = {{Brüseke, Frank and Becker, Steffen and Engels, Gregor}},
  booktitle    = {{Proceedings of the 16th International Workshop on Component-Oriented Programming (WCOP; satellite event of the CompArch 2011), Boulder Colorado, CO (USA)}},
  pages        = {{25--32}},
  publisher    = {{ACM}},
  title        = {{{Palladio-based performance blame analysis}}},
  doi          = {{http://dx.doi.org/10.1145/2000292.2000298}},
  year         = {{2011}},
}

@inproceedings{8472,
  author       = {{Nagel, Benjamin }},
  booktitle    = {{Proceedings of the Software Engineering 2011 (SE 2011), Karlsruhe (Germany)}},
  publisher    = {{Gesellschaft für Informatik (GI)}},
  title        = {{{Semi-automatische Ableitung externer Anpassungsmechanismen für selbst-adaptive Systeme}}},
  year         = {{2011}},
}

@inproceedings{646,
  abstract     = {{This paper presents a dynamic overlay network based on the De Bruijn graph which we call Linearized De Bruijn (LDB) network. The LDB network has the advantage that it has a guaranteed constant node degree and that the routing between any two nodes takes at most O(log n) hops with high probability. Also, we show that there is a simple local-control algorithm that can recover the LDB network from any network topology that is weakly connected.}},
  author       = {{Richa, Andrea W. and Scheideler, Christian}},
  booktitle    = {{Proceedings of the 13th International Symposium on Stabilization, Safety, and Security of Distributed Systems (SSS)}},
  pages        = {{416--430}},
  title        = {{{Self-Stabilizing DeBruijn Networks}}},
  doi          = {{10.1007/978-3-642-24550-3_31}},
  year         = {{2011}},
}

@article{647,
  author       = {{Leuschel, Michael and Wehrheim, Heike}},
  journal      = {{Science of Computer Programming}},
  number       = {{10}},
  pages        = {{835----836}},
  publisher    = {{Elsevier}},
  title        = {{{Selected papers on Integrated Formal Methods (iFM09)}}},
  doi          = {{10.1016/j.scico.2011.01.009}},
  year         = {{2011}},
}

