@inproceedings{11724,
  abstract     = {{In this paper we present a novel vehicle tracking method which is based on multi-stage Kalman filtering of GPS and IMU sensor data. After individual Kalman filtering of GPS and IMU measurements the estimates of the orientation of the vehicle are combined in an optimal manner to improve the robustness towards drift errors. The tracking algorithm incorporates the estimation of time-variant covariance parameters by using an iterative block Expectation-Maximization algorithm to account for time-variant driving conditions and measurement quality. The proposed system is compared to an interacting multiple model approach (IMM) and achieves improved localization accuracy at lower computational complexity. Furthermore we show how the joint parameter estimation and localizaiton can be conducted with streaming input data to be able to track vehicles in a real driving environment.}},
  author       = {{Bevermeier, Maik and Peschke, Sven and Haeb-Umbach, Reinhold}},
  booktitle    = {{IEEE 69th Vehicular Technology Conference (VTC 2009 Spring)}},
  keywords     = {{computational complexity, expectation-maximisation algorithm, Global Positioning System, inertial measurement unit, inertial navigation, interacting multiple model, iterative block expectation-maximization algorithm, Kalman filters, multi-stage Kalman filter, parameter estimation, road vehicles, vehicle positioning, vehicle tracking}},
  pages        = {{1--5}},
  title        = {{{Joint Parameter Estimation and Tracking in a Multi-Stage Kalman Filter for Vehicle Positioning}}},
  doi          = {{10.1109/VETECS.2009.5073634}},
  year         = {{2009}},
}

@article{34564,
  abstract     = {{To provide user interfaces for a rich set of devices and interaction modalities, we follow a model-based development methodology. We devised an architecture which deploys user interfaces specified as dialogue models with abstract interaction objects and allows context-based adaptations by means of an external transcoding process. For the validation of the applicability of this methodology for developing usable multimodal multi-device systems, we present two case studies based on proof-of-concept implementations and assessed them with a large set of established design principles and different types of modality cooperation.}},
  author       = {{Schäfer, Robbie and Müller, Wolfgang}},
  journal      = {{Journal on Multimodal User Interfaces}},
  keywords     = {{Interaction architecture     Abstract interaction objects     Dialogue model     Transformations     Multimodality     Multi-device     Design principles}},
  number       = {{1}},
  pages        = {{25--41}},
  publisher    = {{Springer-Verlag}},
  title        = {{{Assessment of a Multimodal Interaction and Rendering System against Established Design Principles}}},
  doi          = {{10.1007/s12193-008-0003-3}},
  volume       = {{2}},
  year         = {{2008}},
}

@article{17289,
  abstract     = {{Robots have to deal with an enormous amount of sensory stimuli. One solution in making sense of them is to enable a robot system to actively search for cues that help structuring the information. Studies with infants reveal that parents support the learning-process by modifying their interaction style, dependent on their child's developmental age. In our study, in which parents demonstrated everyday actions to their preverbal children (8-11 months old), our aim was to identify objective parameters for multimodal action modification. Our results reveal two action parameters being modified in adult-child interaction: roundness and pace. Furthermore, we found that language has the power to help children structuring actions sequences by synchrony and emphasis. These insights are discussed with respect to the built-in attention architecture of a socially interactive robot, which enables it to understand demonstrated actions. Our algorithmic approach towards automatically detecting the task structure in child-designed input demonstrates the potential impact of insights from developmental learning on robotics. The presented findings pave the way to automatically detect when to imitate in a demonstration}},
  author       = {{Rohlfing, Katharina and Fritsch, Jannik and Wrede, Britta and Jungmann, Tanja}},
  issn         = {{1568-5535}},
  journal      = {{Advanced Robotics}},
  keywords     = {{multi-modal motherese, child-directed input, motionese, learning mechanisms}},
  number       = {{10}},
  pages        = {{1183--1199}},
  publisher    = {{VSP BV}},
  title        = {{{How can multimodal cues from child-directed interaction reduce learning complexity in robots?}}},
  doi          = {{10.1163/156855306778522532}},
  volume       = {{20}},
  year         = {{2006}},
}

@inproceedings{2421,
  abstract     = {{In contrast to processors, current reconfigurable devices totally lack programming models that would allow for device independent compilation and forward compatibility. The key to overcome this limitation is hardware virtualization. In this paper, we resort to a macro-pipelined execution model to achieve hardware virtualization for data streaming applications. As a hardware implementation we present a hybrid multi-context architecture that attaches a coarse-grained reconfigurable array to a host CPU. A co-simulation framework enables cycle-accurate simulation of the complete architecture. As a case study we map an FIR filter to our virtualized hardware model and evaluate different designs. We discuss the impact of the number of contexts and the feature of context state on the speedup and the CPU load.}},
  author       = {{Enzler, Rolf and Plessl, Christian and Platzner, Marco}},
  booktitle    = {{Proc. Int. Conf. on Field Programmable Logic and Applications (FPL)}},
  keywords     = {{Zippy, multi-context, FPGA}},
  pages        = {{151--160}},
  publisher    = {{Springer}},
  title        = {{{Virtualizing Hardware with Multi-Context Reconfigurable Arrays}}},
  doi          = {{10.1007/b12007}},
  volume       = {{2778}},
  year         = {{2003}},
}

