@inproceedings{11716, abstract = {{The accuracy of automatic speech recognition systems in noisy and reverberant environments can be improved notably by exploiting the uncertainty of the estimated speech features using so-called uncertainty-of-observation techniques. In this paper, we introduce a new Bayesian decision rule that can serve as a mathematical framework from which both known and new uncertainty-of-observation techniques can be either derived or approximated. The new decision rule in its direct form leads to the new significance decoding approach for Gaussian mixture models, which results in better performance compared to standard uncertainty-of-observation techniques in different additive and convolutive noise scenarios.}}, author = {{Abdelaziz, Ahmed H. and Zeiler, Steffen and Kolossa, Dorothea and Leutnant, Volker and Haeb-Umbach, Reinhold}}, booktitle = {{Acoustics, Speech and Signal Processing (ICASSP), 2013 IEEE International Conference on}}, issn = {{1520-6149}}, keywords = {{Bayes methods, Gaussian processes, convolution, decision theory, decoding, noise, reverberation, speech coding, speech recognition, Bayesian decision rule, GMM, Gaussian mixture models, additive noise scenarios, automatic speech recognition systems, convolutive noise scenarios, decoding approach, mathematical framework, reverberant environments, significance decoding, speech feature estimation, uncertainty-of-observation techniques, Hidden Markov models, Maximum likelihood decoding, Noise, Speech, Speech recognition, Uncertainty, Uncertainty-of-observation, modified imputation, noise robust speech recognition, significance decoding, uncertainty decoding}}, pages = {{6827--6831}}, title = {{{GMM-based significance decoding}}}, doi = {{10.1109/ICASSP.2013.6638984}}, year = {{2013}}, } @article{11820, abstract = {{In this paper, we derive an uncertainty decoding rule for automatic speech recognition (ASR), which accounts for both corrupted observations and inter-frame correlation. The conditional independence assumption, prevalent in hidden Markov model-based ASR, is relaxed to obtain a clean speech posterior that is conditioned on the complete observed feature vector sequence. This is a more informative posterior than one conditioned only on the current observation. The novel decoding is used to obtain a transmission-error robust remote ASR system, where the speech capturing unit is connected to the decoder via an error-prone communication network. We show how the clean speech posterior can be computed for communication links being characterized by either bit errors or packet loss. Recognition results are presented for both distributed and network speech recognition, where in the latter case common voice-over-IP codecs are employed.}}, author = {{Ion, Valentin and Haeb-Umbach, Reinhold}}, journal = {{IEEE Transactions on Audio, Speech, and Language Processing}}, keywords = {{automatic speech recognition, bit errors, codecs, communication links, corrupted observations, decoding, distributed speech recognition, error-prone communication network, feature vector sequence, hidden Markov model-based ASR, hidden Markov models, inter-frame correlation, Internet telephony, network speech recognition, packet loss, speech posterior, speech recognition, transmission error robust speech recognition, uncertainty decoding, voice-over-IP codecs}}, number = {{5}}, pages = {{1047--1060}}, title = {{{A Novel Uncertainty Decoding Rule With Applications to Transmission Error Robust Speech Recognition}}}, doi = {{10.1109/TASL.2008.925879}}, volume = {{16}}, year = {{2008}}, } @article{11825, abstract = {{In this paper, we propose an enhanced error concealment strategy at the server side of a distributed speech recognition (DSR) system, which is fully compatible with the existing DSR standard. It is based on a Bayesian approach, where the a posteriori probability density of the error-free feature vector is computed, given all received feature vectors which are possibly corrupted by transmission errors. Rather than computing a point estimate, such as the MMSE estimate, and plugging it into the Bayesian decision rule, we employ uncertainty decoding, which results in an integration over the uncertainty in the feature domain. In a typical scenario the communication between the thin client, often a mobile device, and the recognition server spreads across heterogeneous networks. Both bit errors on circuit-switched links and lost data packets on IP connections are mitigated by our approach in a unified manner. The experiments reveal improved robustness both for small- and large-vocabulary recognition tasks.}}, author = {{Ion, Valentin and Haeb-Umbach, Reinhold}}, journal = {{Speech Communication}}, keywords = {{Channel error robustness, Distributed speech recognition, Soft features, Uncertainty decoding}}, number = {{11}}, pages = {{1435--1446}}, title = {{{Uncertainty decoding for distributed speech recognition over error-prone networks}}}, doi = {{10.1016/j.specom.2006.03.007}}, volume = {{48}}, year = {{2006}}, }