@phdthesis{4443,
  abstract     = {{	
Mit welchen Erwartungen und Anforderungen sehen sich Studierende im Hochschulalltag konfrontiert? Wie muss nach Ansicht der Studierenden gehandelt werden, um das Studium "erfolgreich" zu bewältigen? Wie laufen die Auswahl unterschiedlicher Lernaktivitäten im Studium sowie die Allokation von Ressourcen im Detail ab? Diese und weitere Fragen sind vor allem im Zusammenhang mit der Debatte um die Bologna-Reform stärker in den Mittelpunkt gerückt. Denn während modularisierte Curricula und Credit-Point-Systeme oft nach administrativ-organisatorischen Gesichtspunkten eingeführt wurden, hat die Gestaltung der Studienstrukturen auf Programmebene zwangsweise Auswirkungen auf das Studienhandeln - und damit auch auf das Lernen der Studierenden. Es gibt viele Vermutungen über die "Wirkungen" von Bologna auf das Studieren: Die Rede ist von Verschulung, Entwissenschaftlichung und Arbeitsüberlastung und Studierende stehen im Verdacht, ihren Studienpfad eher im Sinne einer Credit-Point-Jagd denn interessengeleitet zu gestalten. In der Regel bleibt es dabei jedoch bei anekdotischen Beobachtungen - es fehlt bislang an gesicherten Erkenntnissen über Zusammenhänge zwischen Bologna-konformen Studienkontexten und dem Handeln Studierender. Denn obschon Studierende seit langem Gegenstand der Forschung sind, ist bislang wenig darüber bekannt, wie sie den langfristigen Bildungsprozess eines ganzen Studiums gestalten. Stattdessen wurde vorwiegend das Lernen im engeren Sinne, das so genannte "Classroom Learning" untersucht.

Diejenigen, die Studienprogramme nach didaktischen Gesichtspunkten gestalten wollen müssen wissen, wie Studierende mit unterschiedlichen Studienstrukturen umgehen. Denn nur so ist eine planvolle Programmgestaltung im Sinne pädagogischer Zielsetzungen und didaktischer Prinzipien möglich. Die vorliegende Arbeit nimmt diese Problemstellung auf und untersucht, welche Zielvorstellungen und Handlungsstrategien Studierende bei der Bewältigung ihres Studienalltags in unterschiedlichen Bologna-konformen Studienprogrammen entwickeln. Im Einzelnen wird gefragt, (1) welche Vorstellungen, Wahrnehmungen und Handlungslogiken das Handeln Studierender in unterschiedlichen Studienprogrammen prägen und (2) welche programmspezifischen Kontextbedingungen die Herausbildung dieser handlungsleitenden Charakteristika beeinflussen. Ausgehend von diesen beiden Fragestellungen wird zunächst ein theoretisch fundierter Analyserahmen für Studienprogramme entwickelt. Darauf aufbauend werden verschiedene Studienprogramme empirisch untersucht, um schliesslich Design-Prinzipien für die Gestaltung von Studienprogrammen zu formulieren.

Es wird nicht davon ausgegangen, dass Studienstrukturen das Handeln Studierender im Sinne eines kausalen Wirkungszusammenhangs bestimmen. Vielmehr wird gefragt, was die Studierenden mit den Strukturen, auf die sie treffen, machen. Studienprogramme werden als kulturelle Einheiten verstanden, innerhalb derer sich spezifische handlungsleitende Vorstellungen und Normen entwickeln. Diese impliziten Regeln des Studienhandelns - das so genannte "Hidden Curriculum" - und vor allem auch die dahinterliegenden Konstruktionsprozesse aufzudecken, ist Ziel dieser Arbeit.}},
  author       = {{Jenert, Tobias}},
  keywords     = {{Studierverhalten, Hochschuldidaktik, Lernpsychologie, Kulturpsychologie, Bologna-Prozess, Curriculumentwicklung, Hochschulbildung, Study programme, Bologna-process, educational development, cultural psychology, higher education, curriculum development}},
  publisher    = {{Universität St. Gallen}},
  title        = {{{Studienprogramme als didaktische Gestaltungs-und Untersuchungseinheit: Theoretische Grundlegung und empirische Analyse}}},
  year         = {{2012}},
}

@inproceedings{4486,
  author       = {{Gebhardt, Anja and Jenert, Tobias}},
  keywords     = {{learning culture, social interaction}},
  location     = {{Belgrad, Serbien }},
  title        = {{{How to Assess and Study the Cultural Dimension of Social Interactions in Higher Education Institutions (HEI)}}},
  year         = {{2012}},
}

@article{4706,
  abstract     = {{Purpose – The purpose of this paper is to show how to employ complex event processing (CEP) for the observation and management of business processes. It proposes a conceptual architecture of BPM event producer, processor, and consumer and describes technical implications for the application with standard software in a perfect order scenario. Design/methodology/approach – The authors discuss business process analytics as the technological background. The capabilities of CEP in a BPM context are outlined an architecture design is proposed. A sophisticated proof-of-concept demonstrates its applicability. Findings – The results overcome the separation and data latency issues of process controlling, monitoring, and simulation. Distinct analyses of past, present, and future blur into a holistic real-time approach. The authors highlight the necessity for configurable event producer in BPM engines, process event support in CEP engines, a common process event format, connectors to visualizers, notifiers and return channels to the BPM engine. Research limitations/implications – Further research will thoroughly evaluate the approach in a variety of business settings. New concepts and standards for the architecture's building blocks will be needed to improve maintainability and operability. Practical implications – Managers learn how CEP can yield insights into business processes' operations. The paper illustrates a path to overcome inflexibility, latency, and missing feedback mechanisms of current process modeling and control solutions. Software vendors might be interested in the conceptualization and the described needs for further development. Originality/value – So far, there is no commercial CEP-based BPM solution which facilitates a round trip from insight to action as outlines. As major software vendors have begun developing solutions (BPM/BPA solutions), this paper will stimulate a debate between research and practice on suitable design and technology.}},
  author       = {{Janiesch, Christian and Matzner, Martin and Müller, Oliver}},
  isbn         = {{1020120096}},
  issn         = {{14637154}},
  journal      = {{Business Process Management Journal}},
  keywords     = {{Architecture, Business activity monitoring, Business process management, Business process re-engineering, Complex event processing, Computer software, Standard software}},
  number       = {{4}},
  pages        = {{625----643}},
  title        = {{{Beyond process monitoring: A proof-of-concept of event-driven business activity management}}},
  doi          = {{10.1108/14637151211253765}},
  year         = {{2012}},
}

@inproceedings{9783,
  abstract     = {{To optimize the ultrasound irradiation for cavitation based ultrasound applications like sonochemistry or ultrasound cleaning, the correlation between cavitation intensity and the resulting effect on the process is of interest. Furthermore, changing conditions like temperature and pressure result in varying acoustic properties of the liquid. That might necessitate an adaption of the ultrasound irradiation. To detect such changes during operation, process monitoring is desired. Labor intensive processes, that might be carried out for several hours, also require process monitoring to increase their reliability by detection of changes or malfunctions during operation. In some applications cavitation detection and monitoring can be achieved by the application of sensors in the sound field. Though the application of sensors is possible, this necessitates modifications on the system and the sensor might disturb the sound field. In other applications harsh, process conditions prohibit the application of sensors in the sound field. Therefore alternative techniques for cavitation detection and monitoring are desired. The applicability of an external microphone and a self-sensing ultrasound transducer for cavitation detection were experimentally investigated. Both methods were found to be suitable and easily applicable.}},
  author       = {{Bornmann, Peter and Hemsel, Tobias and Sextro, Walter and Maeda, Takafumi and Morita, Takeshi}},
  booktitle    = {{Ultrasonics Symposium (IUS), 2012 IEEE International}},
  issn         = {{1948-5719}},
  keywords     = {{cavitation, chemical reactors, microphones, process monitoring, reliability, ultrasonic applications, ultrasonic waves, acoustic properties, cavitation based ultrasound applications, cavitation intensity, change detection reliability, external microphone, malfunction detection reliability, nonperturbing cavitation detection, nonperturbing cavitation monitoring, process monitoring, self-sensing ultrasound transducer, sonochemical reactors, sonochemistry, ultrasound cleaning, ultrasound irradiation, Acoustics, Liquids, Monitoring, Sensors, Sonar equipment, Transducers, Ultrasonic imaging}},
  pages        = {{1141--1144}},
  title        = {{{Non-perturbing cavitation detection / monitoring in sonochemical reactors}}},
  doi          = {{10.1109/ULTSYM.2012.0284}},
  year         = {{2012}},
}

@article{17225,
  abstract     = {{How is communicative gesture behavior in robots perceived by humans? Although gesture is crucial in social interaction, this research question is still largely unexplored in the field of social robotics. Thus, the main objective of the present work is to investigate how gestural machine behaviors can be used to design more natural communication in social robots. The chosen approach is twofold. Firstly, the technical challenges encountered when implementing a speech-gesture generation model on a robotic platform are tackled. We present a framework that enables the humanoid robot to flexibly produce synthetic speech and co-verbal hand and arm gestures at run-time, while not being limited to a predefined repertoire of motor actions. Secondly, the achieved flexibility in robot gesture is exploited in controlled experiments. To gain a deeper understanding of how communicative robot gesture might impact and shape human perception and evaluation of human-robot interaction, we conducted a between-subjects experimental study using the humanoid robot in a joint task scenario. We manipulated the non-verbal behaviors of the robot in three experimental conditions, so that it would refer to objects by utilizing either (1) unimodal (i.e., speech only) utterances, (2) congruent multimodal (i.e., semantically matching speech and gesture) or (3) incongruent multimodal (i.e., semantically non-matching speech and gesture) utterances. Our findings reveal that the robot is evaluated more positively when non-verbal behaviors such as hand and arm gestures are displayed along with speech, even if they do not semantically match the spoken utterance.}},
  author       = {{Salem, Maha and Kopp, Stefan and Wachsmuth, Ipke and Rohlfing, Katharina and Joublin, Frank}},
  issn         = {{1875-4805}},
  journal      = {{International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions}},
  keywords     = {{Social Human-Robot Interaction, Multimodal Interaction and Conversational Skills, Robot Companions and Social Robots, Non-verbal Cues and Expressiveness}},
  number       = {{2}},
  pages        = {{201--217}},
  publisher    = {{Springer Science + Business Media}},
  title        = {{{Generation and evaluation of communicative robot gesture}}},
  doi          = {{10.1007/s12369-011-0124-9}},
  volume       = {{4}},
  year         = {{2012}},
}

@article{17428,
  abstract     = {{How is communicative gesture behavior in robots perceived by humans? Although gesture is crucial in social interaction, this research question is still largely unexplored in the field of social robotics. Thus, the main objective of the present work is to investigate how gestural machine behaviors can be used to design more natural communication in social robots. The chosen approach is twofold. Firstly, the technical challenges encountered when implementing a speech-gesture generation model on a robotic platform are tackled. We present a framework that enables the humanoid robot to flexibly produce synthetic speech and co-verbal hand and arm gestures at run-time, while not being limited to a predefined repertoire of motor actions. Secondly, the achieved flexibility in robot gesture is exploited in controlled experiments. To gain a deeper understanding of how communicative robot gesture might impact and shape human perception and evaluation of human-robot interaction, we conducted a between-subjects experimental study using the humanoid robot in a joint task scenario. We manipulated the non-verbal behaviors of the robot in three experimental conditions, so that it would refer to objects by utilizing either (1) unimodal (i.e., speech only) utterances, (2) congruent multimodal (i.e., semantically matching speech and gesture) or (3) incongruent multimodal (i.e., semantically non-matching speech and gesture) utterances. Our findings reveal that the robot is evaluated more positively when non-verbal behaviors such as hand and arm gestures are displayed along with speech, even if they do not semantically match the spoken utterance.}},
  author       = {{Salem, Maha and Kopp, Stefan and Wachsmuth, Ipke and Rohlfing, Katharina and Joublin, Frank}},
  issn         = {{1875-4805}},
  journal      = {{International Journal of Social Robotics, Special Issue on Expectations, Intentions, and Actions}},
  keywords     = {{Social Human-Robot Interaction, Multimodal Interaction and Conversational Skills, Robot Companions and Social Robots, Non-verbal Cues and Expressiveness}},
  number       = {{2}},
  pages        = {{201--217}},
  publisher    = {{Springer Science + Business Media}},
  title        = {{{Generation and evaluation of communicative robot gesture}}},
  doi          = {{10.1007/s12369-011-0124-9}},
  volume       = {{4}},
  year         = {{2012}},
}

@inproceedings{1120,
  abstract     = {{SCM is a simple, modular and flexible system for web monitoring and customer interaction management. In our view, its main advantages are the following: It is completely web based. It combines all technologies, data, software agents and human agents involved in the monitoring and customer interaction process. It can be used for messages written in any natural language. Although the prototype of SCM is designed for classifying and processing messages about mobile-phone related problems in social networks, SCM can easily be adapted to other text types such as discussion board posts, blogs or emails. Unlike comparable systems, SCM uses linguistic technologies to classify messages and recognize paraphrases of product names. For two reasons, product name paraphrasing plays a major role in SCM: First, product names typically have many, sometimes hundreds or thousands of intralingual paraphrases. Secondly, product names have interlingual paraphrases: The same products are often called or spelt differently in different countries and/or languages. By mapping product name variants to an international canonical form, SCM allows for answering questions like Which statements are made about this mobile phone in which languages/in which social networks/in which countries/...? The SCM product name paraphrasing engine is designed in such a way that standard variants are assigned automatically, regular variants are assigned semiautomatically and idiosyncratic variants can be added manually. With this and similar features we try to realize our philosophy of simplicity, modularity and flexibility: Whatever can be done automatically is done automatically. But manual intervention is always possible and easy and it does not conflict in any way with the automatic functions of SCM.}},
  author       = {{Schuster, Jörg and Lee, Yeong Su and Kobothanassi, Despina  and Bargel, Matthias and Geierhos, Michaela}},
  booktitle    = {{International Conference on Information Society (i-Society 2011)}},
  isbn         = {{978-1-61284-148-9}},
  keywords     = {{Social Media Business Integration, Contact Center Application Support, Monitoring Social Conversations, Social Customer Interaction Management, Monitoring, Software Agents}},
  location     = {{London, UK}},
  pages        = {{153--158}},
  publisher    = {{IEEE}},
  title        = {{{SCM - A Simple, Modular and Flexible Customer Interaction Management System}}},
  year         = {{2011}},
}

@article{1125,
  abstract     = {{Since customers first share their problems with a social networking community before directly addressing a company, social networking sites such as Facebook, Twitter, MySpace or Foursquare will be the interface between customer and company. For this reason, it is assumed that social networks will evolve into a common communication channel – not only between individuals but also between customers and companies. However, social networking has not yet been integrated into customer interaction management (CIM) tools. In general, a CIM application is used by the agents in a contact centre while communicating with the customers. Such systems handle communication across multiple different channels, such as e-mail, telephone, Instant Messaging, letter etc. What we do now is to integrate social networking into CIM applications by adding another communication channel. This allows the company to follow general trends in customer opinions on the Internet, but also record two-sided communication for customer service management and the company’s response will be delivered through the customer’s preferred social networking site.}},
  author       = {{Geierhos, Michaela}},
  issn         = {{17982340}},
  journal      = {{Journal of Advances in Information Technology}},
  keywords     = {{Social Media Business Integration, Multichannel Customer Interaction Management, Contact Centre Application Support}},
  number       = {{4}},
  pages        = {{222--233}},
  publisher    = {{Engineering and Technology Publishing (ETPub)}},
  title        = {{{Customer Interaction 2.0: Adopting Social Media as Customer Service Channel}}},
  doi          = {{10.4304/jait.2.4.222-233}},
  volume       = {{2}},
  year         = {{2011}},
}

@inproceedings{4707,
  author       = {{Janiesch, Christian and Matzner, Martin and Müller, Oliver}},
  booktitle    = {{Lecture Notes in Computer Science}},
  isbn         = {{9783642230585}},
  issn         = {{03029743}},
  keywords     = {{Reference architecture, business activity management, business process analytics, business process management, complex event processing}},
  title        = {{{A blueprint for event-driven business activity management}}},
  doi          = {{10.1007/978-3-642-23059-2_4}},
  year         = {{2011}},
}

@article{17233,
  abstract     = {{It has been proposed that the design of robots might benefit from interactions that are similar to caregiver–child interactions, which is tailored to children’s respective capacities to a high degree. However, so far little is known about how people adapt their tutoring behaviour to robots and whether robots can evoke input that is similar to child-directed interaction. The paper presents detailed analyses of speakers’ linguistic and non-linguistic behaviour, such as action demonstration, in two comparable situations: In one experiment, parents described and explained to their nonverbal infants the use of certain everyday objects; in the other experiment, participants tutored a simulated robot on the same objects. The results, which show considerable differences between the two situations on almost all measures, are discussed in the light of the computer-as-social-actor paradigm and the register hypothesis.}},
  author       = {{Fischer, Kerstin and Foth, Kilian and Rohlfing, Katharina and Wrede, Britta}},
  issn         = {{1572-0381}},
  journal      = {{Interaction Studies}},
  keywords     = {{human–robot interaction (HRI), social communication, register theory, motionese, robotese, child-directed speech (CDS), motherese, mindless transfer, computers-as-social-actors}},
  number       = {{1}},
  pages        = {{134--161}},
  publisher    = {{John Benjamins Publishing Company}},
  title        = {{{Mindful tutors: Linguistic choice and action demonstration in speech to infants and a simulated robot}}},
  doi          = {{10.1075/is.12.1.06fis}},
  volume       = {{12}},
  year         = {{2011}},
}

@article{17246,
  author       = {{Nomikou, Iris and Rohlfing, Katharina}},
  issn         = {{1943-0612}},
  journal      = {{IEEE Transactions on Autonomous Mental Development}},
  keywords     = {{acoustic packaging, mother-child interaction, social learning, multimodal grounding in input, ecology of interactions, synchrony}},
  number       = {{2}},
  pages        = {{113--128}},
  publisher    = {{Institute of Electrical & Electronics Engineers (IEEE)}},
  title        = {{{Language Does Something: Body Action and Language in Maternal Input to Three-Month-Olds}}},
  doi          = {{10.1109/TAMD.2011.2140113}},
  volume       = {{3}},
  year         = {{2011}},
}

@inproceedings{17430,
  abstract     = {{Previous work has shown that gestural behaviors affect anthropomorphic inferences about artificial communicators such as virtual agents. In an experiment with a humanoid robot, we investigated to what extent gesture would affect anthropomorphic inferences about the robot. Particularly, we examined the effects of the robot's hand and arm gestures on the attribution of typically human traits, likability of the robot, shared reality, and future contact intentions after interacting with the robot. For this, we manipulated the non-verbal behaviors of the humanoid robot in three experimental conditions: (1) no gesture, (2) congruent gesture, and (3) incongruent gesture. We hypothesized higher ratings on all dependent measures in the two gesture (vs. no gesture) conditions. The results confirm our predictions: when the robot used gestures during interaction, it was anthropomorphized more, participants perceived it as more likable, reported greater shared reality with it, and showed increased future contact intentions than when the robot gave instructions without using gestures. Surprisingly, this effect was particularly pronounced when the robot's gestures were partly incongruent with speech. These findings show that communicative non-verbal behaviors in robotic systems affect both anthropomorphic perceptions and the mental models humans form of a humanoid robot during interaction.}},
  author       = {{Salem, Maha and Eyssel, Friederike Anne and Rohlfing, Katharina and Kopp, Stefan and Joublin, F.}},
  booktitle    = {{Social Robotics}},
  editor       = {{Mutlu, B. and Bartneck, C. and Ham, J. and Evers, V. and Kanda, T.}},
  isbn         = {{978-3-642-25503-8}},
  keywords     = {{Multimodal Interaction and Conversational Skills, Anthropomorphism, Non-verbal Cues and Expressiveness}},
  pages        = {{31--41}},
  publisher    = {{Springer Science + Business Media}},
  title        = {{{Effects of gesture on the perception of psychological anthropomorphism: A case study with a humanoid robot}}},
  doi          = {{10.1007/978-3-642-25504-5_4}},
  volume       = {{7072}},
  year         = {{2011}},
}

@inproceedings{17244,
  abstract     = {{Robots interacting with humans need to understand actions and make use of language in social interactions. Research on infant development has shown that language helps the learner to structure visual observations of action. This acoustic information typically in the form of narration overlaps with action sequences and provides infants with a bottom-up guide to ﬁnd structure within them. This concept has been introduced as acoustic packaging by Hirsh-Pasek and Golinkoff. We developed and integrated a prominence detection module in our acoustic packaging system to detect semantically relevant information linguistically highlighted by the tutor. Evaluation results on speech data from adult-infant interactions show a signiﬁcant agreement with human raters. Furthermore a ﬁrst approach based on acoustic packages which uses the prominence detection results to generate acoustic feedback is presented. Index Terms: prominence, multimodal action segmentation, human robot interaction, feedback}},
  author       = {{Schillingmann, Lars and Wagner, Petra and Munier, Christian and Wrede, Britta and Rohlfing, Katharina}},
  booktitle    = {{Interspeech 2011 (12th Annual Conference of the International Speech Communication Association)}},
  keywords     = {{Feedback, Human Robot Interaction, Prominence, Multimodal Action Segmentation}},
  pages        = {{3105--3108}},
  title        = {{{Using Prominence Detection to Generate Acoustic Feedback in Tutoring Scenarios}}},
  year         = {{2011}},
}

@inproceedings{17245,
  author       = {{Schillingmann, Lars and Wagner, Petra and Munier, Christian and Wrede, Britta and Rohlfing, Katharina}},
  issn         = {{1662-5188}},
  keywords     = {{Prominence, Multimodal Action Segmentation, Feedback, Color Saliency, Human Robot Interaction}},
  title        = {{{Acoustic Packaging and the Learning of Words}}},
  doi          = {{10.3389/conf.fncom.2011.52.00020}},
  year         = {{2011}},
}

@inproceedings{17242,
  abstract     = {{Previous work has shown that gestural behaviors affect anthropomorphic inferences about artificial communicators such as virtual agents. In an experiment with a humanoid robot, we investigated to what extent gesture would affect anthropomorphic inferences about the robot. Particularly, we examined the effects of the robot's hand and arm gestures on the attribution of typically human traits, likability of the robot, shared reality, and future contact intentions after interacting with the robot. For this, we manipulated the non-verbal behaviors of the humanoid robot in three experimental conditions: (1) no gesture, (2) congruent gesture, and (3) incongruent gesture. We hypothesized higher ratings on all dependent measures in the two gesture (vs. no gesture) conditions. The results confirm our predictions: when the robot used gestures during interaction, it was anthropomorphized more, participants perceived it as more likable, reported greater shared reality with it, and showed increased future contact intentions than when the robot gave instructions without using gestures. Surprisingly, this effect was particularly pronounced when the robot's gestures were partly incongruent with speech. These findings show that communicative non-verbal behaviors in robotic systems affect both anthropomorphic perceptions and the mental models humans form of a humanoid robot during interaction.}},
  author       = {{Salem, Maha and Eyssel, Friederike Anne and Rohlfing, Katharina and Kopp, Stefan and Joublin, F.}},
  booktitle    = {{Social Robotics}},
  editor       = {{Mutlu, B. and Bartneck, C. and Ham, J. and Evers, V. and Kanda, T.}},
  isbn         = {{978-3-642-25503-8}},
  keywords     = {{Multimodal Interaction and Conversational Skills, Anthropomorphism, Non-verbal Cues and Expressiveness}},
  pages        = {{31--41}},
  publisher    = {{Springer Science + Business Media}},
  title        = {{{Effects of gesture on the perception of psychological anthropomorphism: A case study with a humanoid robot}}},
  doi          = {{10.1007/978-3-642-25504-5_4}},
  volume       = {{7072}},
  year         = {{2011}},
}

@article{5195,
  abstract     = {{This article analyses 336 German venture capital transactions from 1990 to 2005 and seeks to determine why selected financial securities differ across deals. We find that a broad array of financial instruments is used, covering straight equity, mezzanine and debt‐like securities. Based on the chosen financial securities’ upside potential and downside protection characteristics, we provide an explanation for the differing use of these securities. Our results show that investors’ deal experience, adverse selection risks and economic prospects in the public equity market influence the selection of financial securities. }},
  author       = {{Hartmann-Wendels, Thomas and Keienburg, Georg and Sievers, Sönke}},
  journal      = {{European Financial Management (VHB-JOURQUAL 4 Ranking B)}},
  keywords     = {{venture capital, capital structure, contract theory, deal experience}},
  number       = {{3}},
  pages        = {{464--499}},
  publisher    = {{Wiley Online Library}},
  title        = {{{Adverse selection, investor experience and security choice in venture capital finance: evidence from Germany}}},
  doi          = {{10.1111/j.1468-036X.2010.00568.x}},
  volume       = {{17}},
  year         = {{2011}},
}

@inproceedings{23858,
  abstract     = {{A large proportion of plastics today is compounded, which means the process from refining a raw material to the processable material. For this process compounding extruders are used which mostly involve tightly intermeshing, co-rotating twin screw extruders. These extruders consist of two closely spaced screws which rotate in the same direction and convey the raw material to the screw tip. These screws are surrounded by several barrel modules which heat or cool the material. As the whole design of the machine is modularly arranged the process behavior of a twin screw extruder depends for the main part on the arrangement of the screw and the barrel elements. Until today this arrangement and process optimization is conducted by experienced engineers and with the help of trial-and-error methods. Furthermore, theoretical models are used with which the behavior of the extruder is estimated. As these models are mostly very complex they are only made available with the realization in different software projects. One of the tools is called SIGMA. Within this paper SIGMA is introduced as a software to optimize a twin screw extruder. SIGMA supports the engineer already in the early stages of the extruder arrangement.}},
  author       = {{Kretzschmar, Nils and Schöppner, Volker}},
  booktitle    = {{Proceedings of the 2010 Summer Computer Simulation Conference}},
  keywords     = {{process optimization, polymer engineering, compounding, twin screw extruder, simulation}},
  pages        = {{133–140}},
  publisher    = {{Society for Computer Simulation International}},
  title        = {{{Simulating Tightly Intermeshing, Co-Rotating Twin Screw Extruders with SIGMA}}},
  year         = {{2010}},
}

@inproceedings{37040,
  abstract     = {{Refinement of untimed TLM models into a timed HW/SW platform is a step by step design process which is a trade-off between timing accuracy of the used models and correct estimation of the final timing performance. The use of an RTOS on the target platform is mandatory in the case real-time properties must be guaranteed. Thus, the question is when the RTOS must be introduced in this step by step refinement process. This paper proposes a four-level RTOS-aware refinement methodology that, starting from an untimed TLM SystemC description of the whole system, progressively introduce HW/SW partitioning, timing, device driver and RTOS functionalities, till to obtain an accurate model of the final platform, where SW tasks run upon an RTOS hosted by QEMU and HW components are modeled by cycle accurate TLM descriptions. Each refinement level allows the designer to estimate more and more accurate timing properties, thus anticipating design decisions without being constrained to leave timing analysis to the final step of the refinement. The effectiveness of the methodology has been evaluated in the design of two complex platforms.}},
  author       = {{Becker, Markus and Di Guglielmo, Giuseppe and Fummi, Franco and Müller, Wolfgang and Pravadelli, Graziano and Xie, Tao}},
  booktitle    = {{Proceedings of DATE’10}},
  keywords     = {{Timing, Hardware, Operating systems, Process design, Accuracy, Standards development, Context modeling, Real time systems, Communication channels, Microprogramming}},
  location     = {{Dresden}},
  publisher    = {{IEEE}},
  title        = {{{RTOS-Aware Refinement for TLM2.0-based HW/SW Design}}},
  doi          = {{10.1109/DATE.2010.5456965}},
  year         = {{2010}},
}

@inproceedings{37050,
  abstract     = {{The main obstacle for the wide acceptance of UML and SysML in the design of electronic systems is due to a major gap in the design flow between UML-based modeling and SystemC-based verification. To overcome this gap, we present an approach developed in the SATURN project which introduces UML profiles for the co-modeling of SystemC and C with code generation support in the context of the SysML tool suite ARTiSAN Studio®. We finally discuss the evaluation of the approach by two case studies.}},
  author       = {{Müller, Wolfgang and He, Da and Mischkalla, Fabian and Wegele, Arthur and Larkham, Adrian and Whiston, Paul and Penil, Pablo and Villar, Eugenio and Mitas, Nikolaos and Kritharidis, Dimitros and Azcarate, Florent and Carballeda, Manuel}},
  booktitle    = {{Proceedings of the IEEE Computer Society Annual Symposium on VLSI}},
  keywords     = {{Communicate Sequential Process     Virtual Platform     Smart Camera     Synchronous Data Flow     Artisan Studio}},
  title        = {{{The SATURN Approach to SysML-based HW/SW Codesign}}},
  doi          = {{10.1007/978-94-007-1488-5_9}},
  year         = {{2010}},
}

@inproceedings{17253,
  author       = {{Vollmer, Anna-Lisa and Pitsch, Karola and Lohan, Katrin Solveig and Fritsch, Jannik and Rohlfing, Katharina and Wrede, Britta}},
  booktitle    = {{Development and Learning (ICDL), 2010 IEEE 9th International Conference on Development and Learning}},
  keywords     = {{tutoring interaction, social interaction, video signal processing, robot systems, paediatrics, neurophysiology, Learning, infant, feedback, biology computing, cognitive capabilities, cognition, children}},
  pages        = {{76--81}},
  title        = {{{Developing feedback: How children of different age contribute to a tutoring interaction with adults}}},
  year         = {{2010}},
}

