@inproceedings{24,
  author       = {{Kenter, Tobias and Plessl, Christian}},
  booktitle    = {{Proc. Workshop on Heterogeneous High-performance Reconfigurable Computing (H2RC)}},
  title        = {{{Microdisk Cavity FDTD Simulation on FPGA using OpenCL}}},
  year         = {{2016}},
}

@inproceedings{138,
  abstract     = {{Hardware accelerators are becoming popular in academia and industry. To move one step further from the state-of-the-art multicore plus accelerator approaches, we present in this paper our innovative SAVEHSA architecture. It comprises of a heterogeneous hardware platform with three different high-end accelerators attached over PCIe (GPGPU, FPGA and Intel MIC). Such systems can process parallel workloads very efficiently whilst being more energy efficient than regular CPU systems. To leverage the heterogeneity, the workload has to be distributed among the computing units in a way that each unit is well-suited for the assigned task and executable code must be available. To tackle this problem we present two software components; the first can perform resource allocation at runtime while respecting system and application goals (in terms of throughput, energy, latency, etc.) and the second is able to analyze an application and generate executable code for an accelerator at runtime. We demonstrate the first proof-of-concept implementation of our framework on the heterogeneous platform, discuss different runtime policies and measure the introduced overheads.}},
  author       = {{Riebler, Heinrich and Vaz, Gavin Francis and Plessl, Christian and Trainiti, Ettore M. G.  and Durelli, Gianluca C. and Del Sozzo, Emanuele and Santambrogio, Marco D.  and Bolchini, Christina}},
  booktitle    = {{Proceedings of International Forum on Research and Technologies for Society and Industry (RTSI)}},
  pages        = {{1--5}},
  publisher    = {{IEEE}},
  title        = {{{Using Just-in-Time Code Generation for Transparent Resource Management in Heterogeneous Systems}}},
  doi          = {{10.1109/RTSI.2016.7740545}},
  year         = {{2016}},
}

@inbook{156,
  abstract     = {{Many modern compute nodes are heterogeneous multi-cores that integrate several CPU cores with fixed function or reconfigurable hardware cores. Such systems need to adapt task scheduling and mapping to optimise for performance and energy under varying workloads and, increasingly important, for thermal and fault management and are thus relevant targets for self-aware computing. In this chapter, we take up the generic reference architecture for designing self-aware and self-expressive computing systems and refine it for heterogeneous multi-cores. We present ReconOS, an architecture, programming model and execution environment for heterogeneous multi-cores, and show how the components of the reference architecture can be implemented on top of ReconOS. In particular, the unique feature of dynamic partial reconfiguration supports self-expression through starting and terminating reconfigurable hardware cores. We detail a case study that runs two applications on an architecture with one CPU and 12 reconfigurable hardware cores and present self-expression strategies for adapting under performance, temperature and even conflicting constraints. The case study demonstrates that the reference architecture as a model for self-aware computing is highly useful as it allows us to structure and simplify the design process, which will be essential for designing complex future compute nodes. Furthermore, ReconOS is used as a base technology for flexible protocol stacks in Chapter 10, an approach for self-aware computing at the networking level.}},
  author       = {{Agne, Andreas and Happe, Markus and Lösch, Achim and Plessl, Christian and Platzner, Marco}},
  booktitle    = {{Self-aware Computing Systems}},
  pages        = {{145--165}},
  publisher    = {{Springer International Publishing}},
  title        = {{{Self-aware Compute Nodes}}},
  doi          = {{10.1007/978-3-319-39675-0_8}},
  year         = {{2016}},
}

@article{165,
  abstract     = {{A broad spectrum of applications can be accelerated by offloading computation intensive parts to reconfigurable hardware. However, to achieve speedups, the number of loop it- erations (trip count) needs to be sufficiently large to amortize offloading overheads. Trip counts are frequently not known at compile time, but only at runtime just before entering a loop. Therefore, we propose to generate code for both the CPU and the coprocessor, and defer the offloading decision to the application runtime. We demonstrate how a toolflow, based on the LLVM compiler framework, can automatically embed dynamic offloading de- cisions into the application code. We perform in-depth static and dynamic analysis of pop- ular benchmarks, which confirm the general potential of such an approach. We also pro- pose to optimize the offloading process by decoupling the runtime decision from the loop execution (decision slack). The feasibility of our approach is demonstrated by a toolflow that automatically identifies suitable data-parallel loops and generates code for the FPGA coprocessor of a Convey HC-1. We evaluate the integrated toolflow with representative loops executed for different input data sizes.}},
  author       = {{Vaz, Gavin Francis and Riebler, Heinrich and Kenter, Tobias and Plessl, Christian}},
  issn         = {{0045-7906}},
  journal      = {{Computers and Electrical Engineering}},
  pages        = {{91--111}},
  publisher    = {{Elsevier}},
  title        = {{{Potential and Methods for Embedding Dynamic Offloading Decisions into Application Code}}},
  doi          = {{10.1016/j.compeleceng.2016.04.021}},
  volume       = {{55}},
  year         = {{2016}},
}

@inproceedings{168,
  abstract     = {{The use of heterogeneous computing resources, such as Graphic Processing Units or other specialized coprocessors, has become widespread in recent years because of their per- formance and energy efficiency advantages. Approaches for managing and scheduling tasks to heterogeneous resources are still subject to research. Although queuing systems have recently been extended to support accelerator resources, a general solution that manages heterogeneous resources at the operating system- level to exploit a global view of the system state is still missing.In this paper we present a user space scheduler that enables task scheduling and migration on heterogeneous processing resources in Linux. Using run queues for available resources we perform scheduling decisions based on the system state and on task characterization from earlier measurements. With a pro- gramming pattern that supports the integration of checkpoints into applications, we preempt tasks and migrate them between three very different compute resources. Considering static and dynamic workload scenarios, we show that this approach can gain up to 17% performance, on average 7%, by effectively avoiding idle resources. We demonstrate that a work-conserving strategy without migration is no suitable alternative.}},
  author       = {{Lösch, Achim and Beisel, Tobias and Kenter, Tobias and Plessl, Christian and Platzner, Marco}},
  booktitle    = {{Proceedings of the 2016 Design, Automation & Test in Europe Conference & Exhibition (DATE)}},
  pages        = {{912--917}},
  publisher    = {{EDA Consortium / IEEE}},
  title        = {{{Performance-centric scheduling with task migration for a heterogeneous compute node in the data center}}},
  year         = {{2016}},
}

@inproceedings{171,
  author       = {{Kenter, Tobias and Vaz, Gavin Francis and Riebler, Heinrich and Plessl, Christian}},
  booktitle    = {{Workshop on Reconfigurable Computing (WRC)}},
  title        = {{{Opportunities for deferring application partitioning and accelerator synthesis to runtime (extended abstract)}}},
  year         = {{2016}},
}

@inproceedings{248,
  author       = {{John, Thomas}},
  booktitle    = {{AIS SIGPRAG Pre-ICIS Workshop: Practice-based Design and Innovation of Digital Artifacts}},
  location     = {{Fort Worth, USA}},
  title        = {{{Supporting Business Model Idea Generation Through Machine-generated Ideas - Towards a Design Theory}}},
  year         = {{2015}},
}

@techreport{249,
  abstract     = {{We analyze the stability of networks when two intermediaries strategically form costly links to customers. We interpret these links as customer relationships that enable trade to sell a product. Equilibrium prices and equilibrium quantities on the output as well as on the input market are determined endogenously for a given network of customer relationships. We investigate in how far the substitutability of the intermediaries' products and the costs of link formation influence the intermediaries' equilibrium profits and thus have an impact on the incentives to strategically form relationships to customers. For networks with three customers we characterize locally stable networks, in particular existence is guaranteed for any degree of substitutability. Moreover for the special cases of perfect complements, independent products and perfect substitutes, local stability coincides with the stronger concept of Nash stability. Additionally, for networks with n customers we analyze stability regions for selected networks and determine their limits when n goes to infinity. It turns out that the shape of the stability regions for those networks does not significantly change compared to a setting with a small number of customers. }},
  author       = {{Brangewitz, Sonja and Haake, Claus-Jochen and Möhlmeier, Philipp}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Strategic Formation of Customer Relationship Networks}}},
  volume       = {{91}},
  year         = {{2015}},
}

@inproceedings{250,
  abstract     = {{Before execution, users should formally validate the correctness of software received from untrusted providers. To accelerate this validation, in the proof carrying code (PCC) paradigm the provider delivers the software together with a certificate, a formal proof of the software’s correctness. Thus, the user only checks if the attached certificate shows correctness of the delivered software.Recently, we introduced configurable program certification, a generic, PCC based framework supporting various software analyses and safety properties. Evaluation of our framework revealed that validation suffers from certificate reading. In this paper, we present two orthogonal approaches which improve certificate validation, both reducing the impact of certificate reading. The first approach reduces the certificate size, storing information only if it cannot easily be recomputed. The second approach partitions the certificate into independently checkable parts. The trick is to read parts of the certificate while already checking read parts. Our experiments show that validation highly benefits from our improvements.}},
  author       = {{Jakobs, Marie-Christine}},
  booktitle    = {{Proceedings of the 13th International Conference on Software Engineering and Formal Methods (SEFM)}},
  pages        = {{159----174}},
  title        = {{{Speed Up Configurable Certificate Validation by Certificate Reduction and Partitioning}}},
  doi          = {{10.1007/978-3-319-22969-0_12}},
  year         = {{2015}},
}

@misc{251,
  author       = {{Pfannschmidt, Karlson}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Solving the aggregated bandits problem}}},
  year         = {{2015}},
}

@inproceedings{253,
  abstract     = {{Group signatures, introduced by Chaum and van Heyst [15], are an important primitive in cryptography. In group signature schemes every group member can anonymously sign messages on behalf of the group. In case of disputes a dedicated opening manager is able to trace signatures - he can extract the identity of the producer of a given signature. A formal model for static group signatures schemes and their security is defined by Bellare, Micciancio, and Warinschi [4], the case of dynamic groups is considered by Bellare, Shi, and Zhang [5]. Both models define group signature schemes with a single opening manager. The main difference between these models is that the number of group members in static schemes is fixed, while in dynamic schemes group members can join the group over time.}},
  author       = {{Blömer, Johannes and Juhnke, Jakob and Löken, Nils}},
  booktitle    = {{Proceedings of the Sixth International Conference on Mathematical Aspects of Computer and Information Sciences (MACIS)}},
  pages        = {{166--180}},
  title        = {{{Short Group Signatures with Distributed Traceability}}},
  doi          = {{10.1007/978-3-319-32859-1_14}},
  year         = {{2015}},
}

@inproceedings{254,
  abstract     = {{We empirically investigate how hosts on Airbnb, a popular peer-to-peer website for fee-based sharing of under-utilized space, adjust their prices once their offering gets a visible star rating for the first time. We use data for over 14,000 offerings from Airbnb which we collected for New York City. Our findings indicate that hosts whose offerings achieve star rating visibility significantly increase their prices by an average of 2.69 € more than hosts with comparable offerings who do not experience this rating visibility during the time of observation. Out of all offerings who achieve rating visibility, we identify the upper quartile of hosts to be the main driver of this price increase, whereas the first 75% percent show only a marginal price reaction. These results can serve as a first step towards understanding the motivation of people to provide assets to the sharing economy.}},
  author       = {{Gutt, Dominik and Herrmann, Philipp}},
  booktitle    = {{Proceedings of the 23rd European Conference on Information Systems (ECIS), Münster}},
  location     = {{Münster, Germany}},
  title        = {{{Sharing Means Caring? Hosts' Price Reactions to Rating Visibility}}},
  year         = {{2015}},
}

@misc{255,
  author       = {{Trykacz, Matthias}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Share Economy - Identifikation von konstituierenden Merkmalen anhand einer vergleichenden Betrachtung von Geschäftsmodellen}}},
  year         = {{2015}},
}

@misc{256,
  author       = {{Zindler, Finn}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Selektive Vertriebssysteme am Fallbeispiel der Adidas AG - eine wettbewerbspolitische Beurteilung}}},
  year         = {{2015}},
}

@inproceedings{280,
  abstract     = {{The Collaborative Research Centre "On-The-Fly Computing" works on foundations and principles for the vision of the Future Internet. It proposes the paradigm of On-The-Fly Computing, which tackles emerging worldwide service markets. In these markets, service providers trade software, platform, and infrastructure as a service. Service requesters state requirements on services. To satisfy these requirements, the new role of brokers, who are (human) actors building service compositions on the fly, is introduced. Brokers have to specify service compositions formally and comprehensively using a domain-specific language (DSL), and to use service matching for the discovery of the constituent services available in the market. The broker's choice of the DSL and matching approaches influences her success of building compositions as distinctive properties of different service markets play a significant role. In this paper, we propose a new approach of engineering a situation-specific DSL by customizing a comprehensive, modular DSL and its matching for given service market properties. This enables the broker to create market-specific composition specifications and to perform market-specific service matching. As a result, the broker builds service compositions satisfying the requester's requirements more accurately. We evaluated the presented concepts using case studies in service markets for tourism and university management.}},
  author       = {{Arifulina, Svetlana and Platenius, Marie Christin and Mohr, Felix and Engels, Gregor and Schäfer, Wilhelm}},
  booktitle    = {{Proceedings of the IEEE 11th World Congress on Services (SERVICES), Visionary Track: Service Composition for the Future Internet}},
  pages        = {{333----340}},
  title        = {{{Market-Specific Service Compositions: Specification and Matching}}},
  doi          = {{10.1109/SERVICES.2015.58}},
  year         = {{2015}},
}

@misc{281,
  author       = {{Rojahn, Tobias}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Load Balancing for Range Queries in a Dimension Invariant Peer-to-Peer Network}}},
  year         = {{2015}},
}

@misc{282,
  author       = {{Kirsch, Michelle}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Koordinierter Patentschutz in einer globalisierten Welt - Effizienz- und Anreizwirkungen auf die Arzneimittelversorgung in Entwicklungsländern}}},
  year         = {{2015}},
}

@inproceedings{283,
  abstract     = {{Today, software verification is an established analysis method which can provide high guarantees for software safety. However, the resources (time and/or memory) for an exhaustive verification are not always available, and analysis then has to resort to other techniques, like testing. Most often, the already achieved partial verification results arediscarded in this case, and testing has to start from scratch.In this paper, we propose a method for combining verification and testing in which testing only needs to check the residual fraction of an uncompleted verification. To this end, the partial results of a verification run are used to construct a residual program (and residual assertions to be checked on it). The residual program can afterwards be fed into standardtesting tools. The proposed technique is sound modulo the soundness of the testing procedure. Experimental results show that this combinedusage of verification and testing can significantly reduce the effort for the subsequent testing.}},
  author       = {{Czech, Mike and Jakobs, Marie-Christine and Wehrheim, Heike}},
  booktitle    = {{Fundamental Approaches to Software Engineering}},
  editor       = {{Egyed, Alexander and Schaefer, Ina}},
  pages        = {{100--114}},
  title        = {{{Just test what you cannot verify!}}},
  doi          = {{10.1007/978-3-662-46675-9_7}},
  year         = {{2015}},
}

@article{284,
  abstract     = {{In this work, we present the first scalable distributed information system, that is, a system with low storage overhead, that is provably robust against denial-of-service (DoS) attacks by a current insider. We allow a current insider to have complete knowledge about the information system and to have the power to block any ϵ-fraction of its servers by a DoS attack, where ϵ can be chosen up to a constant. The task of the system is to serve any collection of lookup requests with at most one per nonblocked server in an efficient way despite this attack. Previously, scalable solutions were only known for DoS attacks of past insiders, where a past insider only has complete knowledge about some past time point t0 of the information system. Scheideler et al. [Awerbuch and Scheideler 2007; Baumgart et al. 2009] showed that in this case, it is possible to design an information system so that any information that was inserted or last updated after t0 is safe against a DoS attack. But their constructions would not work at all for a current insider. The key idea behind our IRIS system is to make extensive use of coding. More precisely, we present two alternative distributed coding strategies with an at most logarithmic storage overhead that can handle up to a constant fraction of blocked servers.}},
  author       = {{Eikel, Martina and Scheideler, Christian}},
  journal      = {{Transactions on Parallel Computing}},
  number       = {{3}},
  pages        = {{18:1----18:33}},
  publisher    = {{ACM}},
  title        = {{{IRIS: A Robust Information System Against Insider DoS Attacks}}},
  doi          = {{10.1145/2809806}},
  year         = {{2015}},
}

@inproceedings{285,
  abstract     = {{We propose an incremental workflow for the verification of parameterized systems modeled as symmetric networks of timed automata. Starting with a small number of timed automata in the network, a safety property is verified using IC3, a state-of-the-art algorithm based on induction.The result of the verification, an inductive strengthening, is reused proposing a candidate inductive strengthening for a larger network.If the candidate is valid, our main theorem states that the safety property holds for all sizes of the network of timed automata. Otherwise the number of automata is increased and the next iteration is started with a new run of IC3.We propose and thoroughly examine optimizations to our workflow, e.g. Feedback mechanisms to speed up the run of IC3.}},
  author       = {{Isenberg, Tobias}},
  booktitle    = {{Proceedings of the 15th International Conference on Application of Concurrency to System Design (ACSD)}},
  pages        = {{1--9 }},
  title        = {{{Incremental Inductive Verification of Parameterized Timed Systems}}},
  doi          = {{10.1109/ACSD.2015.13}},
  year         = {{2015}},
}

