@article{10788,
  author       = {{Trier, Matthias}},
  issn         = {{2198-2775}},
  journal      = {{HMD Praxis der Wirtschaftsinformatik}},
  number       = {{4}},
  pages        = {{714--724}},
  publisher    = {{Springer}},
  title        = {{{Wissensarbeit mit Social Media Plattformen – Unsicherheitsfaktoren als Managementansatz}}},
  doi          = {{0.1365/s40702-018-0427-5}},
  volume       = {{55}},
  year         = {{2018}},
}

@inproceedings{1096,
  abstract     = {{to appear}},
  author       = {{Beyer, Dirk and Jakobs, Marie-Christine and Lemberger, Thomas and Wehrheim, Heike}},
  booktitle    = {{Proceedings of the 40th International Conference on Software Engineering (ICSE)}},
  location     = {{Gothenburg, Sweden}},
  pages        = {{1182----1193}},
  publisher    = {{ACM}},
  title        = {{{Reducer-Based Construction of Conditional Verifiers}}},
  year         = {{2018}},
}

@misc{1097,
  author       = {{Jentzsch, Felix Paul}},
  keywords     = {{Approximate Computing, Proof-Carrying Hardware, Formal Veriﬁcation}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Enforcing IP Core Connection Properties with Verifiable Security Monitors}}},
  year         = {{2018}},
}

@inbook{1104,
  abstract     = {{Präzision ist kein Zufall. Sie wird vom Menschen herbeigeführt, indem Übereinstimmung mit einem Standard oder einem akzeptierten Wert angestrebt wird oder die Reproduzierbarkeit von Experimenten möglichst hoch sein muss. Was aber tun, wenn Präzision mangels verfügbarer Informationen nicht hergestellt werden kann? Wie gehen Wissenschaft und Kunst dann mit dieser fehlenden Eindeutigkeit um? Die Autorinnen und Autoren dieses Sammelbandes beleuchten aus der Perspektive ihrer jeweiligen Fachdisziplin die Chancen bei der Berücksichtigung von Unschärfe(n) in ihrer Forschung und Kunst. Denn Unschärfe ist Realität. }},
  author       = {{Geierhos, Michaela}},
  booktitle    = {{Unschärfe - Der Umgang mit fehlender Eindeutigkeit}},
  editor       = {{Freitag, Steffen and Geierhos, Michaela and Asmani, Rozbeh and Haug, Judith I.}},
  isbn         = {{978-3-506-78896-2}},
  pages        = {{111--128}},
  publisher    = {{Ferdinand Schöningh}},
  title        = {{{Unschärfe bei der Interpretation natürlichsprachlicher Anforderungsbeschreibungen}}},
  year         = {{2018}},
}

@inproceedings{1171,
  author       = {{Müller, Michelle and Gutt, Dominik and Neumann, Jürgen}},
  booktitle    = {{Multikonferenz Wirtschaftsinformatik (MKWI) 2018}},
  location     = {{Lüneburg}},
  title        = {{{Beschreib mir deine Wohnung und ich sag dir wer du bist - Eine explorative Analyse von Gastgeberpersönlichkeiten auf Airbnb}}},
  year         = {{2018}},
}

@inproceedings{11711,
  author       = {{Ajjour, Yamen and Wachsmuth, Henning and Kiesel, Dora and Riehmann, Patrick and Fan, Fan and Castiglia, Giuliano and Adejoh, Rosemary and Fröhlich, Bernd and Stein, Benno}},
  booktitle    = {{Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations}},
  pages        = {{60--65}},
  title        = {{{Visualization of the Topic Space of Argument Search Results in args. me}}},
  year         = {{2018}},
}

@inproceedings{11712,
  author       = {{El Baff, Roxanne and Wachsmuth, Henning and Al Khatib, Khalid and Stein, Benno}},
  booktitle    = {{Proceedings of the 22nd Conference on Computational Natural Language Learning}},
  pages        = {{454--464}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{Challenge or Empower: Revisiting Argumentation Quality in a News Editorial Corpus}}},
  year         = {{2018}},
}

@article{1173,
  author       = {{Kaimann, Daniel and Stroh-Maraun, Nadja and Cox, Joe}},
  journal      = {{Journal of Consumer Behaviour}},
  number       = {{3}},
  pages        = {{290 -- 301}},
  publisher    = {{Wiley Online Library}},
  title        = {{{A Duration Model Analysis of Consumer Preferences and Determinants of Video Game Consumption}}},
  doi          = {{10.1002/cb.1711}},
  volume       = {{17}},
  year         = {{2018}},
}

@inproceedings{11760,
  abstract     = {{Acoustic event detection, i.e., the task of assigning a human interpretable label to a segment of audio, has only recently attracted increased interest in the research community. Driven by the DCASE challenges and the availability of large-scale audio datasets, the state-of-the-art has progressed rapidly with deep-learning-based classi- fiers dominating the field. Because several potential use cases favor a realization on distributed sensor nodes, e.g. ambient assisted living applications, habitat monitoring or surveillance, we are concerned with two issues here. Firstly the classification performance of such systems and secondly the computing resources required to achieve a certain performance considering node level feature extraction. In this contribution we look at the balance between the two criteria by employing traditional techniques and different deep learning architectures, including convolutional and recurrent models in the context of real life everyday audio recordings in realistic, however challenging, multisource conditions.}},
  author       = {{Ebbers, Janek and Nelus, Alexandru and Martin, Rainer and Haeb-Umbach, Reinhold}},
  booktitle    = {{DAGA 2018, München}},
  title        = {{{Evaluation of Modulation-MFCC Features and DNN Classification for Acoustic Event Detection}}},
  year         = {{2018}},
}

@inproceedings{1181,
  abstract     = {{The main idea in On-The-Fly Computing is to automatically compose existing software services according to the wishes of end-users. However, since user requirements are often ambiguous, vague and incomplete, the selection and composition of suitable software services is a challanging task. In this paper, we present our current approach to improve requirement descriptions before they are used for software composition. This procedure is fully automated, but also has limitations, for example, if necessary information is missing. In addition, and in response to the limitations, we provide insights into our above-mentioned current work that combines the existing optimization approach with a
chatbot solution.}},
  author       = {{Bäumer, Frederik Simon and Geierhos, Michaela}},
  booktitle    = {{Joint Proceedings of REFSQ-2018 Workshops, Doctoral Symposium, Live Studies Track, and Poster Track co-located with the 23rd International Conference on Requirements Engineering: Foundation for Software Quality (REFSQ 2018)}},
  editor       = {{Schmid, Klaus and Spoletini, Paola and Ben Charrada, Eya and Chisik, Yoram and Dalpiaz, Fabiano and Ferrari, Alessio and Forbrig, Peter and Franch, Xavier and Kirikova, Marite and Madhavji, Nazim and Palomares, Cristina and Ralyté, Jolita and Sabetzadeh, Mehrdad and Sawyer, Pete and van der Linden, Dirk and Zamansky, Anna}},
  issn         = {{1613-0073}},
  location     = {{Utrecht, The Netherlands}},
  publisher    = {{CEUR-WS.org}},
  title        = {{{How to Deal with Inaccurate Service Requirements? Insights in Our Current Approach and New Ideas}}},
  volume       = {{2075}},
  year         = {{2018}},
}

@inproceedings{1182,
  abstract     = {{Natural language requirement descriptions are often unstructured, contradictory and incomplete and are therefore challenging for automatic processing. Although many of these deficits can be compensated by means of Natural Language Processing, there still remain cases where interaction with end-users is necessary for clarification. In this paper, we present our idea of using chatbot technology to establish end-user communication in order to support the automatic compensation of some deficits in natural language requirement descriptions.}},
  author       = {{Friesen, Edwin and Bäumer, Frederik Simon and Geierhos, Michaela}},
  booktitle    = {{Joint Proceedings of REFSQ-2018 Workshops, Doctoral Symposium, Live Studies Track, and Poster Track co-located with the 23rd International Conference on Requirements Engineering: Foundation for Software Quality (REFSQ 2018)}},
  editor       = {{Schmid, Klaus  and Spoletini, Paola  and Ben Charrada, Eya  and Chisik, Yoram  and Dalpiaz, Fabiano  and Ferrari, Alessio  and Forbrig, Peter  and Franch, Xavier  and Kirikova, Marite  and Madhavji, Nazim  and Palomares, Cristina  and Ralyté, Jolita  and Sabetzadeh, Mehrdad  and Sawyer, Pete  and van der Linden, Dirk  and Zamansky, Anna }},
  issn         = {{1613-0073}},
  location     = {{Utrecht, The Netherlands}},
  publisher    = {{CEUR-WS.org}},
  title        = {{{CORDULA: Software Requirements Extraction Utilizing Chatbot as Communication Interface}}},
  volume       = {{2075}},
  year         = {{2018}},
}

@inproceedings{1183,
  abstract     = {{As our world grows in complexity, companies and employees alike need, more than ever before, solutions tailored to their exact needs. Since such tools cannot always be purchased off-the-shelf and need to be designed from the ground up, developers rely on software requirements. In this paper, we present our vision of a syntactic rule-based extraction
tool for software requirements specification documents. In contrast to other methods, our tool will allow stakeholders to express their needs and wishes in unfiltered natural language, which we believe is essential for non-expert users.}},
  author       = {{Caron, Matthew and Bäumer, Frederik Simon and Geierhos, Michaela}},
  booktitle    = {{Joint Proceedings of REFSQ-2018 Workshops, Doctoral Symposium, Live Studies Track, and Poster Track co-located with the 23rd International Conference on Requirements Engineering: Foundation for Software Quality (REFSQ 2018)}},
  editor       = {{Schmid, Klaus and Spoletini, Paola and Ben Charrada, Eya and Chisik, Yoram and Dalpiaz, Fabiano and Ferrari, Alessio and Forbrig, Peter and Franch, Xavier and Kirikova, Marite and Madhavji, Nazim and Palomares, Cristina and Ralyté, Jolita and Sabetzadeh, Mehrdad and Sawyer, Pete and van der Linden, Dirk and Zamansky, Anna}},
  issn         = {{1613-0073}},
  location     = {{Utrecht, The Netherlands}},
  publisher    = {{CEUR-WS.org}},
  title        = {{{Back to Basics: Extracting Software Requirements with a Syntactic Approach}}},
  volume       = {{2075}},
  year         = {{2018}},
}

@inproceedings{11835,
  abstract     = {{Signal dereverberation using the weighted prediction error (WPE) method has been proven to be an effective means to raise the accuracy of far-field speech recognition. But in its original formulation, WPE requires multiple iterations over a sufficiently long utterance, rendering it unsuitable for online low-latency applications. Recently, two methods have been proposed to overcome this limitation. One utilizes a neural network to estimate the power spectral density (PSD) of the target signal and works in a block-online fashion. The other method relies on a rather simple PSD estimation which smoothes the observed PSD and utilizes a recursive formulation which enables it to work on a frame-by-frame basis. In this paper, we integrate a deep neural network (DNN) based estimator into the recursive frame-online formulation. We evaluate the performance of the recursive system with different PSD estimators in comparison to the block-online and offline variant on two distinct corpora. The REVERB challenge data, where the signal is mainly deteriorated by reverberation, and a database which combines WSJ and VoiceHome to also consider (directed) noise sources. The results show that although smoothing works surprisingly well, the more sophisticated DNN based estimator shows promising improvements and shortens the performance gap between online and offline processing.}},
  author       = {{Heymann, Jahn and Drude, Lukas and Haeb-Umbach, Reinhold and Kinoshita, Keisuke and Nakatani, Tomohiro}},
  booktitle    = {{IWAENC 2018, Tokio, Japan}},
  title        = {{{Frame-Online DNN-WPE Dereverberation}}},
  year         = {{2018}},
}

@inproceedings{11837,
  abstract     = {{We present a block-online multi-channel front end for automatic speech recognition in noisy and reverberated environments. It is an online version of our earlier proposed neural network supported acoustic beamformer, whose coefficients are calculated from noise and speech spatial covariance matrices which are estimated utilizing a neural mask estimator. However, the sparsity of speech in the STFT domain causes problems for the initial beamformer coefficients estimation in some frequency bins due to lack of speech observations. We propose two methods to mitigate this issue. The first is to lower the frequency resolution of the STFT, which comes with the additional advantage of a reduced time window, thus lowering the latency introduced by block processing. The second approach is to smooth beamforming coefficients along the frequency axis, thus exploiting their high interfrequency correlation. With both approaches the gap between offline and block-online beamformer performance, as measured by the word error rate achieved by a downstream speech recognizer, is significantly reduced. Experiments are carried out on two copora, representing noisy (CHiME-4) and noisy reverberant (voiceHome) environments.}},
  author       = {{Heitkaemper, Jens and Heymann, Jahn and Haeb-Umbach, Reinhold}},
  booktitle    = {{ITG 2018, Oldenburg, Germany}},
  title        = {{{Smoothing along Frequency in Online Neural Network Supported Acoustic Beamforming}}},
  year         = {{2018}},
}

@misc{1186,
  author       = {{Kemper, Arne}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Pure Nash Equilibria in Robust Congestion Games via Potential Functions}}},
  year         = {{2018}},
}

@misc{1187,
  author       = {{Nachtigall, Marcel}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Scenario-driven Strategy Analysis in a n-player Composition Game Model}}},
  year         = {{2018}},
}

@inproceedings{11872,
  abstract     = {{The weighted prediction error (WPE) algorithm has proven to be a very successful dereverberation method for the REVERB challenge. Likewise, neural network based mask estimation for beamforming demonstrated very good noise suppression in the CHiME 3 and CHiME 4 challenges. Recently, it has been shown that this estimator can also be trained to perform dereverberation and denoising jointly. However, up to now a comparison of a neural beamformer and WPE is still missing, so is an investigation into a combination of the two. Therefore, we here provide an extensive evaluation of both and consequently propose variants to integrate deep neural network based beamforming with WPE. For these integrated variants we identify a consistent word error rate (WER) reduction on two distinct databases. In particular, our study shows that deep learning based beamforming benefits from a model-based dereverberation technique (i.e. WPE) and vice versa. Our key findings are: (a) Neural beamforming yields the lower WERs in comparison to WPE the more channels and noise are present. (b) Integration of WPE and a neural beamformer consistently outperforms all stand-alone systems.}},
  author       = {{Drude, Lukas and Boeddeker, Christoph and Heymann, Jahn and Kinoshita, Keisuke and Delcroix, Marc and Nakatani, Tomohiro and Haeb-Umbach, Reinhold}},
  booktitle    = {{INTERSPEECH 2018, Hyderabad, India}},
  title        = {{{Integration neural network based beamforming and weighted prediction error dereverberation}}},
  year         = {{2018}},
}

@inproceedings{11873,
  abstract     = {{NARA-WPE is a Python software package providing implementations of the weighted prediction error (WPE) dereverberation algorithm. WPE has been shown to be a highly effective tool for speech dereverberation, thus improving the perceptual quality of the signal and improving the recognition performance of downstream automatic speech recognition (ASR). It is suitable both for single-channel and multi-channel applications. The package consist of (1) a Numpy implementation which can easily be integrated into a custom Python toolchain, and (2) a TensorFlow implementation which allows integration into larger computational graphs and enables backpropagation through WPE to train more advanced front-ends. This package comprises of an iterative offline (batch) version, a block-online version, and a frame-online version which can be used in moderately low latency applications, e.g. digital speech assistants.}},
  author       = {{Drude, Lukas and Heymann, Jahn and Boeddeker, Christoph and Haeb-Umbach, Reinhold}},
  booktitle    = {{ITG 2018, Oldenburg, Germany}},
  title        = {{{NARA-WPE: A Python package for weighted prediction error dereverberation in Numpy and Tensorflow for online and offline processing}}},
  year         = {{2018}},
}

@misc{1188,
  author       = {{Kempf, Jérôme}},
  publisher    = {{Universität Paderborn}},
  title        = {{{Learning deterministic bandit behaviour form compositions}}},
  year         = {{2018}},
}

@article{11916,
  abstract     = {{We present an experimental comparison of seven state-of-the-art machine learning algorithms for the task of semantic analysis of spoken input, with a special emphasis on applications for dysarthric speech. Dysarthria is a motor speech disorder, which is characterized by poor articulation of phonemes. In order to cater for these noncanonical phoneme realizations, we employed an unsupervised learning approach to estimate the acoustic models for speech recognition, which does not require a literal transcription of the training data. Even for the subsequent task of semantic analysis, only weak supervision is employed, whereby the training utterance is accompanied by a semantic label only, rather than a literal transcription. Results on two databases, one of them containing dysarthric speech, are presented showing that Markov logic networks and conditional random fields substantially outperform other machine learning approaches. Markov logic networks have proved to be especially robust to recognition errors, which are caused by imprecise articulation in dysarthric speech.}},
  author       = {{Despotovic, Vladimir and Walter, Oliver and Haeb-Umbach, Reinhold}},
  journal      = {{Speech Communication 99 (2018) 242-251 (Elsevier B.V.)}},
  title        = {{{Machine learning techniques for semantic analysis of dysarthric speech: An experimental study}}},
  year         = {{2018}},
}

