@inproceedings{11813,
  abstract     = {{The parametric Bayesian Feature Enhancement (BFE) and a datadriven Denoising Autoencoder (DA) both bring performance gains in severe single-channel speech recognition conditions. The first can be adjusted to different conditions by an appropriate parameter setting, while the latter needs to be trained on conditions similar to the ones expected at decoding time, making it vulnerable to a mismatch between training and test conditions. We use a DNN backend and study reverberant ASR under three types of mismatch conditions: different room reverberation times, different speaker to microphone distances and the difference between artificially reverberated data and the recordings in a reverberant environment. We show that for these mismatch conditions BFE can provide the targets for a DA. This unsupervised adaptation provides a performance gain over the direct use of BFE and even enables to compensate for the mismatch of real and simulated reverberant data.}},
  author       = {{Heymann, Jahn and Haeb-Umbach, Reinhold and Golik, P. and Schlueter, R.}},
  booktitle    = {{Acoustics, Speech and Signal Processing (ICASSP), 2015 IEEE International Conference on}},
  keywords     = {{codecs, signal denoising, speech recognition, Bayesian feature enhancement, denoising autoencoder, reverberant ASR, single-channel speech recognition, speaker to microphone distances, unsupervised adaptation, Adaptation models, Noise reduction, Reverberation, Speech, Speech recognition, Training, deep neuronal networks, denoising autoencoder, feature enhancement, robust speech recognition}},
  pages        = {{5053--5057}},
  title        = {{{Unsupervised adaptation of a denoising autoencoder by Bayesian Feature Enhancement for reverberant asr under mismatch conditions}}},
  doi          = {{10.1109/ICASSP.2015.7178933}},
  year         = {{2015}},
}

@inproceedings{11716,
  abstract     = {{The accuracy of automatic speech recognition systems in noisy and reverberant environments can be improved notably by exploiting the uncertainty of the estimated speech features using so-called uncertainty-of-observation techniques. In this paper, we introduce a new Bayesian decision rule that can serve as a mathematical framework from which both known and new uncertainty-of-observation techniques can be either derived or approximated. The new decision rule in its direct form leads to the new significance decoding approach for Gaussian mixture models, which results in better performance compared to standard uncertainty-of-observation techniques in different additive and convolutive noise scenarios.}},
  author       = {{Abdelaziz, Ahmed H. and Zeiler, Steffen and Kolossa, Dorothea and Leutnant, Volker and Haeb-Umbach, Reinhold}},
  booktitle    = {{Acoustics, Speech and Signal Processing (ICASSP), 2013 IEEE International Conference on}},
  issn         = {{1520-6149}},
  keywords     = {{Bayes methods, Gaussian processes, convolution, decision theory, decoding, noise, reverberation, speech coding, speech recognition, Bayesian decision rule, GMM, Gaussian mixture models, additive noise scenarios, automatic speech recognition systems, convolutive noise scenarios, decoding approach, mathematical framework, reverberant environments, significance decoding, speech feature estimation, uncertainty-of-observation techniques, Hidden Markov models, Maximum likelihood decoding, Noise, Speech, Speech recognition, Uncertainty, Uncertainty-of-observation, modified imputation, noise robust speech recognition, significance decoding, uncertainty decoding}},
  pages        = {{6827--6831}},
  title        = {{{GMM-based significance decoding}}},
  doi          = {{10.1109/ICASSP.2013.6638984}},
  year         = {{2013}},
}

@article{11820,
  abstract     = {{In this paper, we derive an uncertainty decoding rule for automatic speech recognition (ASR), which accounts for both corrupted observations and inter-frame correlation. The conditional independence assumption, prevalent in hidden Markov model-based ASR, is relaxed to obtain a clean speech posterior that is conditioned on the complete observed feature vector sequence. This is a more informative posterior than one conditioned only on the current observation. The novel decoding is used to obtain a transmission-error robust remote ASR system, where the speech capturing unit is connected to the decoder via an error-prone communication network. We show how the clean speech posterior can be computed for communication links being characterized by either bit errors or packet loss. Recognition results are presented for both distributed and network speech recognition, where in the latter case common voice-over-IP codecs are employed.}},
  author       = {{Ion, Valentin and Haeb-Umbach, Reinhold}},
  journal      = {{IEEE Transactions on Audio, Speech, and Language Processing}},
  keywords     = {{automatic speech recognition, bit errors, codecs, communication links, corrupted observations, decoding, distributed speech recognition, error-prone communication network, feature vector sequence, hidden Markov model-based ASR, hidden Markov models, inter-frame correlation, Internet telephony, network speech recognition, packet loss, speech posterior, speech recognition, transmission error robust speech recognition, uncertainty decoding, voice-over-IP codecs}},
  number       = {{5}},
  pages        = {{1047--1060}},
  title        = {{{A Novel Uncertainty Decoding Rule With Applications to Transmission Error Robust Speech Recognition}}},
  doi          = {{10.1109/TASL.2008.925879}},
  volume       = {{16}},
  year         = {{2008}},
}

