@article{15739, author = {{Azadi, Sam and Kühne, Thomas D.}}, issn = {{2469-9950}}, journal = {{Physical Review B}}, pages = {{155103--5}}, title = {{{Unconventional phase III of high-pressure solid hydrogen}}}, doi = {{10.1103/physrevb.100.155103}}, volume = {{100}}, year = {{2019}}, } @article{15740, author = {{Guc, Maxim and Kodalle, Tim and Kormath Madam Raghupathy, Ramya and Mirhosseini, Hossein and Kühne, Thomas D. and Becerril-Romero, Ignacio and Pérez-Rodríguez, Alejandro and Kaufmann, Christian A. and Izquierdo-Roca, Victor}}, issn = {{1932-7447}}, journal = {{The Journal of Physical Chemistry C}}, pages = {{1285--1291}}, title = {{{Vibrational Properties of RbInSe2: Raman Scattering Spectroscopy and First-Principle Calculations}}}, doi = {{10.1021/acs.jpcc.9b08781}}, volume = {{124}}, year = {{2019}}, } @article{15741, abstract = {{ In many cyber–physical systems, we encounter the problem of remote state estimation of geo- graphically distributed and remote physical processes. This paper studies the scheduling of sensor transmissions to estimate the states of multiple remote, dynamic processes. Information from the different sensors has to be transmitted to a central gateway over a wireless network for monitoring purposes, where typically fewer wireless channels are available than there are processes to be monitored. For effective estimation at the gateway, the sensors need to be scheduled appropriately, i.e., at each time instant one needs to decide which sensors have network access and which ones do not. To address this scheduling problem, we formulate an associated Markov decision process (MDP). This MDP is then solved using a Deep Q-Network, a recent deep reinforcement learning algorithm that is at once scalable and model-free. We compare our scheduling algorithm to popular scheduling algorithms such as round-robin and reduced-waiting-time, among others. Our algorithm is shown to significantly outperform these algorithms for many example scenario}}, author = {{Leong, Alex S. and Ramaswamy, Arunselvan and Quevedo, Daniel E. and Karl, Holger and Shi, Ling}}, issn = {{0005-1098}}, journal = {{Automatica}}, title = {{{Deep reinforcement learning for wireless sensor scheduling in cyber–physical systems}}}, doi = {{10.1016/j.automatica.2019.108759}}, year = {{2019}}, } @misc{15746, author = {{Otte, Oliver}}, title = {{{Outsourced Decryption of Attribute-based Ciphertexts}}}, year = {{2019}}, } @misc{15747, author = {{Wördenweber, Nico Christof}}, title = {{{On the Security of the Rouselakis-Waters Ciphertext-Policy Attribute-Based Encryption Scheme in the Random Oracle Model}}}, year = {{2019}}, } @inproceedings{15812, abstract = {{Connectionist temporal classification (CTC) is a sequence-level loss that has been successfully applied to train recurrent neural network (RNN) models for automatic speech recognition. However, one major weakness of CTC is the conditional independence assumption that makes it difficult for the model to learn label dependencies. In this paper, we propose stimulated CTC, which uses stimulated learning to help CTC models learn label dependencies implicitly by using an auxiliary RNN to generate the appropriate stimuli. This stimuli comes in the form of an additional stimulation loss term which encourages the model to learn said label dependencies. The auxiliary network is only used during training and the inference model has the same structure as a standard CTC model. The proposed stimulated CTC model achieves about 35% relative character error rate improvements on a synthetic gesture keyboard recognition task and over 30% relative word error rate improvements on the Librispeech automatic speech recognition tasks over a baseline model trained with CTC only.}}, author = {{Heymann, Jahn and Khe Chai Sim, Bo Li}}, booktitle = {{ICASSP 2019, Brighton, UK}}, title = {{{Improving CTC Using Stimulated Learning for Sequence Modeling}}}, year = {{2019}}, } @inproceedings{15816, abstract = {{Despite the strong modeling power of neural network acoustic models, speech enhancement has been shown to deliver additional word error rate improvements if multi-channel data is available. However, there has been a longstanding debate whether enhancement should also be carried out on the ASR training data. In an extensive experimental evaluation on the acoustically very challenging CHiME-5 dinner party data we show that: (i) cleaning up the training data can lead to substantial error rate reductions, and (ii) enhancement in training is advisable as long as enhancement in test is at least as strong as in training. This approach stands in contrast and delivers larger gains than the common strategy reported in the literature to augment the training database with additional artificially degraded speech. Together with an acoustic model topology consisting of initial CNN layers followed by factorized TDNN layers we achieve with 41.6% and 43.2% WER on the DEV and EVAL test sets, respectively, a new single-system state-of-the-art result on the CHiME-5 data. This is a 8% relative improvement compared to the best word error rate published so far for a speech recognizer without system combination.}}, author = {{Zorila, Catalin and Boeddeker, Christoph and Doddipatla, Rama and Haeb-Umbach, Reinhold}}, booktitle = {{ASRU 2019, Sentosa, Singapore}}, title = {{{An Investigation Into the Effectiveness of Enhancement in ASR Training and Test for Chime-5 Dinner Party Transcription}}}, year = {{2019}}, } @misc{15819, author = {{Leutnant, Matthias}}, title = {{{Experimentelle Untersuchung des SEM-Algorithmus}}}, year = {{2019}}, } @inproceedings{15838, abstract = {{In the field of software analysis a trade-off between scalability and accuracy always exists. In this respect, Android app analysis is no exception, in particular, analyzing large or many apps can be challenging. Dealing with many small apps is a typical challenge when facing micro-benchmarks such as DROIDBENCH or ICC-BENCH. These particular benchmarks are not only used for the evaluation of novel tools but also in continuous integration pipelines of existing mature tools to maintain and guarantee a certain quality-level. Considering this latter usage it becomes very important to be able to achieve benchmark results as fast as possible. Hence, benchmarks have to be optimized for this purpose. One approach to do so is app merging. We implemented the Android Merge Tool (AMT) following this approach and show that its novel aspects can be used to produce scaled up and accurate benchmarks. For such benchmarks Android app analysis tools do not suffer from the scalability-accuracy trade-off anymore. We show this throughout detailed experiments on DROIDBENCH employing three different analysis tools (AMANDROID, ICCTA, FLOWDROID). Benchmark execution times are largely reduced without losing benchmark accuracy. Moreover, we argue why AMT is an advantageous successor of the state-of-the-art app merging tool (APKCOMBINER) in analysis lift-up scenarios.}}, author = {{Pauck, Felix and Zhang, Shikun}}, booktitle = {{2019 34th IEEE/ACM International Conference on Automated Software Engineering Workshop (ASEW)}}, isbn = {{9781728141367}}, keywords = {{Program Analysis, Android App Analysis, Taint Analysis, App Merging, Benchmark}}, title = {{{Android App Merging for Benchmark Speed-Up and Analysis Lift-Up}}}, doi = {{10.1109/asew.2019.00019}}, year = {{2019}}, } @article{15875, author = {{Camberg, Alan Adam and Tröster, Thomas and Bohner, F. and Tölle, J.}}, issn = {{1757-899X}}, journal = {{IOP Conference Series: Materials Science and Engineering}}, pages = {{012057}}, title = {{{Predicting plasticity and fracture of severe pre-strained EN AW-5182 by Yld2000 yield locus and Hosford-Coulomb fracture model in sheet forming applications}}}, doi = {{10.1088/1757-899X/651/1/012057}}, volume = {{651}}, year = {{2019}}, }