@inproceedings{4698,
  author       = {{Gregor, Shirley and Müller, Oliver and Seidel, Stefan}},
  booktitle    = {{European Conference on Information Systems}},
  keywords     = {{Abstraction, Affordances, Design Science Research, Design Theory, Information Systems Development, Reflection, Theorizing}},
  title        = {{{Reflection, abstraction and theorizing in design and development research}}},
  year         = {{2013}},
}

@article{4699,
  author       = {{Becker, Jörg and Beverungen, Daniel and Knackstedt, Ralf and Matzner, Martin and Müller, Oliver and Pöppelbuss, Jens}},
  issn         = {{09050167}},
  journal      = {{Scandinavian Journal of Information Systems}},
  keywords     = {{Business process management, Conceptual modeling, Interaction routines, Modular design, Service networks, Social construction}},
  number       = {{1}},
  pages        = {{17----47}},
  title        = {{{Designing interaction routines in service networks: A modularity and social construction-based approach}}},
  year         = {{2013}},
}

@inproceedings{10620,
  author       = {{Anwer, Jahanzeb and Meisner, Sebastian and Platzner, Marco}},
  booktitle    = {{Reconfigurable Computing and FPGAs (ReConFig), 2013 International Conference on}},
  keywords     = {{fault tolerant computing, field programmable gate arrays, logic design, reliability, BYU-LANL tool, DRM tool flow, FPGA based hardware designs, avionic application, device technologies, dynamic reliability management, fault-tolerant operation, hardware designs, reconfiguring reliability levels, space applications, Field programmable gate arrays, Hardware, Redundancy, Reliability engineering, Runtime, Tunneling magnetoresistance}},
  pages        = {{1--6}},
  title        = {{{Dynamic reliability management: Reconfiguring reliability-levels of hardware designs at runtime}}},
  doi          = {{10.1109/ReConFig.2013.6732280}},
  year         = {{2013}},
}

@article{5717,
  abstract     = {{Although professional service providers increasingly deliver their services globally, little is known about cross-cultural differences in customers’ motivation to participate in service production. To address this lacuna, we survey a total of 2,284 banking customers in 11 countries on their motivation to provide personal information to, and follow the advice of, their service providers. We find differences in both aspects, but only the differences in providing personal information can be explained by the cultural values of uncertainty avoidance, individualism/collectivism, and masculinity/femininity. To perform certain tasks in the service process, global professional service providers should acknowledge cultural differences in customers’ motivations.}},
  author       = {{Schumann, Jan H and Wünderlich, Nancy and Zimmer, Marcus S}},
  journal      = {{Schmalenbach Business Review}},
  keywords     = {{Co-Production, Culture, Customer Participation, Professional Services}},
  number       = {{2}},
  pages        = {{141--165}},
  publisher    = {{Springer}},
  title        = {{{Culture’s Impact on Customer Motivation to Engage in Professional Service Enactments.}}},
  volume       = {{64}},
  year         = {{2012}},
}

@inproceedings{9791,
  abstract     = {{The rapid development of communication and information technology opens up fascinating perspectives, which go far beyond the state of the art in mechatronics: mechatronic systems with inherent partial intelligence. These so called self-optimizing systems adapt their objectives and behavior autonomously and flexibly to changing operating conditions. On the one hand, securing the dependability of such systems is challenging due to their complexity and non-deterministic behavior. On the other hand, self-optimization can be used to increase the dependability of the system during its operation. However, it has to be ensured, that the self-optimization works dependable itself. To cope with these challenges, the multi-level dependability concept was developed. It enables predictive condition monitoring, influences the objectives of the system and determines suitable means to improve the system's dependability during its operation. In this contribution we introduce a procedure for the conceptual design of an advanced condition monitoring based on the system's principle solution. The principle solution describes the principal operation mode of the system and its desired behavior. It is modeled using the specification technique for the domain-spanning description of the principle solution of a self-optimizing system and consists of a coherent system of eight partial models (e.g. requirements, active structure, system of objectives, behavior, etc.). The partial models are analyzed separately in order to derive the components of the multi-level dependability concept. In particular, the reliability analysis of the partial model active structure is performed to identify the system elements to be monitored and parameters to be measured. The principle solution is extended accordingly: e.g. with system elements required for the realization of the dependability concept. The advantages of the method are shown on the self-optimizing guidance module of a railroad vehicle.}},
  author       = {{Sondermann-Wölke , Christoph and Meyer, Tobias and Dorociak, Rafal and Gausemeier, Jürgen and Sextro, Walter}},
  booktitle    = {{Proceedings of the 11th International Probabilistic Safety Assessment and Management Conference (PSAM11) and The Annual European Safety and Reliability Conference (ESREL2012)}},
  keywords     = {{Mechatronic Systems, Principle Solution, Condition Monitoring, Conceptual Design}},
  title        = {{{Conceptual Design of Advanced Condition Monitoring for a Self-Optimizing System based on its Principle Solution}}},
  year         = {{2012}},
}

@inproceedings{36994,
  abstract     = {{This paper proposes a quality driven, simulation based approach to functional design verification, which applies mainly to IP-level HDL designs with well specified test instruction format and is evaluated on a soft microprocessor core MB-LITE [5]. The approach utilizes mutation analysis as the quality metric to steer an automated simulation data generation process. It leads to a simulation flow with two phases towards an enhanced mutation analysis result. First in a random simulation phase, an in-loop heuristics is deployed and adjusts dynamically the test probability distribution so as to improve the coverage efficiency. Next, for each remaining hard-to-kill mutant, a search heuristics on test input space is developed to iteratively locate a target test, using a specific objective cost function for the goal of killing HDL mutant. The effectiveness of this integrated two-phase simulation flow is demonstrated by the results with the MB-LITE microprocessor IP.}},
  author       = {{Xie, Tao  and Müller, Wolfgang and Letombe, Florian}},
  booktitle    = {{Proceedings of SOCC2012}},
  keywords     = {{Analytical models, Hardware design languages, Microprocessors, Cost function, Data models, Search problems, IP networks}},
  publisher    = {{IEEE}},
  title        = {{{Mutation-Analysis Driven Functional Verification of a Soft Microprocessor}}},
  doi          = {{10.1109/SOCC.2012.6398362}},
  year         = {{2012}},
}

@article{4708,
  author       = {{Müller-Wienbergen, Felix and Müller, Oliver and Seidel, Stefan and Becker, Jörg}},
  isbn         = {{1536-9323}},
  issn         = {{15369323}},
  journal      = {{Journal of the Association for Information Systems}},
  keywords     = {{Creativity, Creativity Support Systems, convergent thinking, design theory, divergent thinking}},
  number       = {{11}},
  pages        = {{714----740}},
  title        = {{{Leaving the Beaten Tracks in Creative Work – A Design Theory for Systems that Support Convergent and Divergent Thinking}}},
  doi          = {{10.1016/S0006-3495(00)76637-9}},
  year         = {{2011}},
}

@article{11850,
  abstract     = {{In this paper, we present a novel blocking matrix and fixed beamformer design for a generalized sidelobe canceler for speech enhancement in a reverberant enclosure. They are based on a new method for estimating the acoustical transfer function ratios in the presence of stationary noise. The estimation method relies on solving a generalized eigenvalue problem in each frequency bin. An adaptive eigenvector tracking utilizing the power iteration method is employed and shown to achieve a high convergence speed. Simulation results demonstrate that the proposed beamformer leads to better noise and interference reduction and reduced speech distortions compared to other blocking matrix designs from the literature.}},
  author       = {{Krueger, Alexander and Warsitz, Ernst and Haeb-Umbach, Reinhold}},
  journal      = {{IEEE Transactions on Audio, Speech, and Language Processing}},
  keywords     = {{acoustical transfer function ratio, adaptive eigenvector tracking, array signal processing, beamformer design, blocking matrix, eigenvalues and eigenfunctions, eigenvector-based transfer function ratios estimation, generalized sidelobe canceler, interference reduction, iterative methods, power iteration method, reduced speech distortions, reverberant enclosure, reverberation, speech enhancement, stationary noise}},
  number       = {{1}},
  pages        = {{206--219}},
  title        = {{{Speech Enhancement With a GSC-Like Structure Employing Eigenvector-Based Transfer Function Ratios Estimation}}},
  doi          = {{10.1109/TASL.2010.2047324}},
  volume       = {{19}},
  year         = {{2011}},
}

@inproceedings{2200,
  author       = {{Kenter, Tobias and Platzner, Marco and Plessl, Christian and Kauschke, Michael}},
  booktitle    = {{Proc. Int. Symp. on Field-Programmable Gate Arrays (FPGA)}},
  isbn         = {{978-1-4503-0554-9}},
  keywords     = {{design space exploration, LLVM, partitioning, performance, estimation, funding-intel}},
  pages        = {{177--180}},
  publisher    = {{ACM}},
  title        = {{{Performance Estimation Framework for Automated Exploration of CPU-Accelerator Architectures}}},
  doi          = {{10.1145/1950413.1950448}},
  year         = {{2011}},
}

@inproceedings{37002,
  abstract     = {{HDL-mutation based fault injection and analysis is considered as an important coverage metric for measuring the quality of design simulation processes [20, 3, 1, 2]. In this work, we try to solve the problem of automatic simulation data generation targeting HDL mutation faults. We follow a search based approach and eliminate the need for symbolic execution and mathematical constraint solving from existing work. An objective cost function is defined on the test input space and serves the guidance of search for fault-detecting test data. This is done by first mapping the simulation traces under a test onto a control and data flow graph structure which is extracted from the design. Then the progress of fault detection can be measured quantitatively on this graph to be the cost value. By minimizing this cost we approach the target test data. The effectiveness of the cost function is investigated under an example neighborhood search scheme. Case study with a floating point arithmetic IP design has shown that the cost function is able to guide effectively the search procedure towards a fault-detecting test. The cost calculation time as the search overhead was also observed to be minor compared to the actual design simulation time.}},
  author       = {{Xie, Tao and Müller, Wolfgang and Letombe, Florian}},
  booktitle    = {{Proceedings of Euromicro DSD 2011}},
  isbn         = {{978-1-4577-1048-3}},
  keywords     = {{Hardware design languages, Cost function, Computational modeling, Fault detection, Data models, Analytical models, Testing}},
  publisher    = {{IEEE}},
  title        = {{{HDL-Mutation Based Simulation Data Generation by Propagation Guided Search}}},
  doi          = {{10.1109/DSD.2011.83}},
  year         = {{2011}},
}

@inproceedings{5690,
  abstract     = {{In a world, where more and more businesses seem to trade in an online market, the supply of online services to supply the ever-growing demand could quickly reach its capacity limits. Online service providers may find themselves maxed out at peak operation levels during high-traffic timeslots but too little demand during low-traffic timeslots, although the latter is becoming less frequent. At this point not only deciding which user is allocated what level of service becomes essential, but also the magnitude of the service provided, can be controlled by pricing. Pricing is an important factor when efficient and acceptable allocation of resources between individuals must be reached. Without prices, transferring or sharing goods would be impossible. In sharing information, pricing a product however is not as simple as relatively pricing an apple or a pear. Often the costs, and hence the prices are simply unknown. Backed by this scenario, the online services market could be combined with the market design mechanism of diamonds. For this we propose an ultimatum pricing strategy which effectively allows for valuations to be accounted for, but no longer a necessity when pricing in grid, cloud or other online computing environments.}},
  author       = {{Bodenstein, Christian and Schryen, Guido and Neumann, Dirk}},
  booktitle    = {{18th European Conference on Information Systems (ECIS 2010)}},
  keywords     = {{Posted Price, Ultimatum Game, Energy Efficiency, Mechanism Design}},
  title        = {{{From "Take-it-or-leave-it" offers to "Take-it-or-be-left-out" Ultimatum - A trade mechanism for Online Services}}},
  year         = {{2010}},
}

@inproceedings{9760,
  abstract     = {{Self-optimizing systems are able to adapt their behavior autonomously according to their current self-determined objectives. Unforeseen influences could lead to dependability-critical behavior of the system. Methods are required which secure self-optimizing systems during operation. These methods to increase the dependability of the system should already be taken into consideration in the design process. This paper presents a guideline for the dependability-oriented design of self-optimizing systems, which integrates established classical methods like failure mode and effects analysis as well as methods based on self-optimization. On the one hand self-optimization is used to increase the dependability of the system by integrating objectives like safety, availability, and reliability to the objectives of the system. On the other hand methods are required to ensure the self-optimization itself. As basis for this guideline serves the principle solution of the system. The six phases of the guideline extend the design process and lead to an enhanced principle solution. Additionally, the guideline illustrates phases to implement and validate the self-optimizing system. The proposed guideline is applied to an innovative rail-bound vehicle, called RailCab, which is equipped with self-optimizing function modules.}},
  author       = {{Sondermann-Wölke, Christoph and Hemsel, Tobias and Sextro, Walter and Gausemeier, Jürgen and Pook, Sebastian}},
  booktitle    = {{Industrial Informatics (INDIN), 2010 8th IEEE International Conference on}},
  keywords     = {{RailCab, dependability-critical behavior, dependability-oriented design, failure mode, rail-bound vehicle, secure self-optimizing systems, self-optimizing function modules, optimisation, railways, self-adjusting systems}},
  pages        = {{739 --744}},
  title        = {{{Guideline for the dependability-oriented design of self-optimizing systems}}},
  doi          = {{10.1109/INDIN.2010.5549490}},
  year         = {{2010}},
}

@article{46411,
  abstract     = {{The paper presents a framework to optimise the design of work roll based on the cooling performance. The framework develops meta-models from a set of finite element analyses (FEA) of the roll cooling. A design of experiment technique is used to identify the FEA runs. The research also identifies sources of uncertainties in the design process. A robust evolutionary multi-objective evaluation technique is applied to the design optimisation in constrained problems with real life uncertainty. The approach handles uncertainties associated both with design variables and fitness functions. Constraints violation within the neighbourhood of a design is considered as part of a measurement for degree of feasibility and robustness of a solution.}},
  author       = {{Azene, Y.T. and Roy, R. and Farrugia, D. and Onisa, C. and Mehnen, J. and Trautmann, Heike}},
  issn         = {{1755-5817}},
  journal      = {{CIRP Journal of Manufacturing Science and Technology}},
  keywords     = {{Roll cooling design, Uncertainty, Design optimisation, Multi-objective optimisation, Constraint in design}},
  number       = {{4}},
  pages        = {{290--298}},
  title        = {{{Work roll cooling system design optimisation in presence of uncertainty and constrains}}},
  doi          = {{https://doi.org/10.1016/j.cirpj.2010.06.001}},
  volume       = {{2}},
  year         = {{2010}},
}

@inproceedings{37040,
  abstract     = {{Refinement of untimed TLM models into a timed HW/SW platform is a step by step design process which is a trade-off between timing accuracy of the used models and correct estimation of the final timing performance. The use of an RTOS on the target platform is mandatory in the case real-time properties must be guaranteed. Thus, the question is when the RTOS must be introduced in this step by step refinement process. This paper proposes a four-level RTOS-aware refinement methodology that, starting from an untimed TLM SystemC description of the whole system, progressively introduce HW/SW partitioning, timing, device driver and RTOS functionalities, till to obtain an accurate model of the final platform, where SW tasks run upon an RTOS hosted by QEMU and HW components are modeled by cycle accurate TLM descriptions. Each refinement level allows the designer to estimate more and more accurate timing properties, thus anticipating design decisions without being constrained to leave timing analysis to the final step of the refinement. The effectiveness of the methodology has been evaluated in the design of two complex platforms.}},
  author       = {{Becker, Markus and Di Guglielmo, Giuseppe and Fummi, Franco and Müller, Wolfgang and Pravadelli, Graziano and Xie, Tao}},
  booktitle    = {{Proceedings of DATE’10}},
  keywords     = {{Timing, Hardware, Operating systems, Process design, Accuracy, Standards development, Context modeling, Real time systems, Communication channels, Microprogramming}},
  location     = {{Dresden}},
  publisher    = {{IEEE}},
  title        = {{{RTOS-Aware Refinement for TLM2.0-based HW/SW Design}}},
  doi          = {{10.1109/DATE.2010.5456965}},
  year         = {{2010}},
}

@inproceedings{37046,
  abstract     = {{In this article, we present a flexible simulation environment for embedded real-time software refinement by a mixed level cosimulation. For this, we combine the native speed of an abstract real-time operating system (RTOS) model in SystemC with dynamic binary translation for fast Instruction Set Simulation (ISS) by QEMU. In order to support stepwise RTOS software refinement from system level to the target software, each task can be separately migrated between the native execution and the ISS. By adapting the dynamic binary translation approach to an efficient but yet very accurate synchronization scheme the overhead of QEMU user mode execution is only factor two compared to native SystemC. Furthermore, the simulation speed increases almost linearly according to the utilization of the task set abstracted by the native execution. Hereby, the simulation time can be considerably reduced by cosimulating just a subset of tasks on QEMU.}},
  author       = {{Becker, Markus and Zabel, Henning and Müller, Wolfgang}},
  editor       = {{Kleinjohann, L. and Kleinjohann, B.}},
  isbn         = {{978-3-642-15233-7}},
  keywords     = {{Application Programming Interface     User Mode     Kernel Space     System Level Design     Mixed Level}},
  publisher    = {{Springer Verlag}},
  title        = {{{A Mixed Level Simulation Environment for Stepwise RTOS Software Refinement}}},
  doi          = {{10.1007/978-3-642-15234-4_15}},
  year         = {{2010}},
}

@inproceedings{37039,
  abstract     = {{Refinement of untimed TLM models into a timed HW/SW platform is a step by step design process which is a trade-off between timing accuracy of the used models and correct estimation of the final timing performance. The use of an RTOS on the target platform is mandatory in the case real-time properties must be guaranteed. Thus, the question is when the RTOS must be introduced in this step by step refinement process. This paper proposes a four-level RTOS-aware refinement methodology that, starting from an untimed TLM SystemC description of the whole system, progressively introduce HW/SW partitioning, timing, device driver and RTOS functionalities, till to obtain an accurate model of the final platform, where SW tasks run upon an RTOS hosted by QEMU and HW components are modeled by cycle accurate TLM descriptions. Each refinement level allows the designer to estimate more and more accurate timing properties, thus anticipating design decisions without being constrained to leave timing analysis to the final step of the refinement. The effectiveness of the methodology has been evaluated in the design of two complex platforms.}},
  author       = {{Becker, Markus and Di Guglielmo, Giuseppe and Fummi, Franco and Müller, Wolfgang and Pravadelli, Graziano and Xie, Tao}},
  booktitle    = {{Proceedings of DATE’10}},
  keywords     = {{Timing, Hardware, Operating systems, Process design, Accuracy, Standards development, Context modeling, Real time systems, Communication channels, Microprogramming}},
  location     = {{Dresden}},
  publisher    = {{IEEE}},
  title        = {{{RTOS-Aware Refinement for TLM2.0-based HW/SW Design}}},
  doi          = {{10.1109/DATE.2010.5456965}},
  year         = {{2010}},
}

@inproceedings{24065,
  author       = {{Pottebaum, Jens and Japs, Anna Maria and Prödel, Stephan and Koch, Rainer}},
  booktitle    = {{ISCRAM 2010 -- 7th International Conference on Information Systems for Crisis Response and Management}},
  editor       = {{French, Simon and Tomaszewski, Brian and Zobel, Chris}},
  keywords     = {{Command and control process, Command and control systems, Design and modeling, Domain ontologies, Emergency response, Fire extinguishers, Fire protection, Heterogeneous domains, Information analysis, Information sharing, Information systems, Interoperability, Ontology language, Semantic technologies, Semantic Web, Semantics}},
  title        = {{{Design and modeling of a domain ontology for fire protection}}},
  year         = {{2010}},
}

@article{34563,
  abstract     = {{UML has been widely accepted by the software community for several years. As electronic systems design can no longer be seen as an isolated hardware design activity, UML becomes of significant interest as a unification language for systems description combining both HW and SW components. This article provides a comprehensive view of the UML applied to System-on-Chip (SoC) and hardware-related embedded systems design. The modeling concepts in the UML language are first introduced, including major diagrams for the representation of the behavior and the structure of systems. The principles behind application specific UML customizations (UML profiles) are summarized, and several examples relevant for SoC design are given, such as the SysML (System Modeling Language) and the SoC Profile. Thereafter, various approaches associating UML with existing HW/SW design languages are presented. Beyond language aspects, the article addresses the question of UML-based design flows, and shows how UML can be applied concretely to the development of electronic-based systems. The current situation about tool support constitutes the last focus of the article. In particular, we show how UML tools can be combined with well-known simulation environments, such as MATLAB.}},
  author       = {{Vanderperren, Yves and Müller, Wolfgang and Dahaene, Wim}},
  journal      = {{Design Automation for Embedded Systems}},
  keywords     = {{UML     SysML     Model-based design     System specification     Modelling languages}},
  pages        = {{261--292}},
  publisher    = {{Springer-Verlag}},
  title        = {{{UML for Electronic Systems Design – A Comprehensive Overview}}},
  doi          = {{10.1007/s10617-008-9028-9}},
  volume       = {{12}},
  year         = {{2008}},
}

@article{34564,
  abstract     = {{To provide user interfaces for a rich set of devices and interaction modalities, we follow a model-based development methodology. We devised an architecture which deploys user interfaces specified as dialogue models with abstract interaction objects and allows context-based adaptations by means of an external transcoding process. For the validation of the applicability of this methodology for developing usable multimodal multi-device systems, we present two case studies based on proof-of-concept implementations and assessed them with a large set of established design principles and different types of modality cooperation.}},
  author       = {{Schäfer, Robbie and Müller, Wolfgang}},
  journal      = {{Journal on Multimodal User Interfaces}},
  keywords     = {{Interaction architecture     Abstract interaction objects     Dialogue model     Transformations     Multimodality     Multi-device     Design principles}},
  number       = {{1}},
  pages        = {{25--41}},
  publisher    = {{Springer-Verlag}},
  title        = {{{Assessment of a Multimodal Interaction and Rendering System against Established Design Principles}}},
  doi          = {{10.1007/s12193-008-0003-3}},
  volume       = {{2}},
  year         = {{2008}},
}

@article{5658,
  abstract     = {{Email communication is encumbered with a mass of email messages which their recipients have neither requested nor require. Even worse, the impacts of these messages are far from being simply an annoyance, as they also involve economic damage. This manuscript examines the resource ?email addresses?, which is vital for any potential bulk mailer and spammer. Both a methodology and a honeypot conceptualization for implementing an empirical analysis of the usage of email addresses placed on the Internet are proposed here. Their objective is to assess, on a quantitative basis, the extent of the current harassment and its development over time. This ?framework? is intended to be extensible to measuring the effectiveness of address-obscuring techniques. The implementation of a pilot honeypot is described, which led to key findings, some of them being: (1) Web placements attract more than two-thirds (70\%) of all honeypot spam emails, followed by newsgroup placements (28.6\%) and newsletter subscriptions (1.4\%), (2) the proportions of spam relating to the email addresses? top-level domain can be statistically assumed to be uniformly distributed, (3) More than 43\% of addresses on the web have been abused, whereas about 27\% was the case for addresses on newsgroups and only about 4\% was the case for addresses used for a newsletter subscription, (4) Regarding the development of email addresses? attractiveness for spammers over time, the service ?web sites? features a negative linear relationship, whereas the service ?Usenet? hows a negative exponential relationship. (5) Only 1.54\% of the spam emails showed an interrelation between the topic of the spam email and that of the location where the recipient?s address was placed, so that spammers are assumed to send their emails in a ?context insensitive? manner. The results of the empirical analysis motivate the need for the protection of email addresses through obscuration. We analyze this need by formulating requirements for address obscuring techniques and we reveal to which extent today?s most relevant approaches fulfill these requirements.}},
  author       = {{Schryen, Guido}},
  journal      = {{Computers & Security}},
  keywords     = {{Address-obfuscating techniques, email, empirical analysis, honeypot, security by design, security by obscurity, spam}},
  number       = {{5}},
  pages        = {{361--372}},
  publisher    = {{Elsevier}},
  title        = {{{The Impact that Placing Email Addresses on the Internet has on the Receipt of Spam ? An Empirical Analysis}}},
  volume       = {{2}},
  year         = {{2007}},
}

