[{"page":"727-738","volume":115,"issue":"4","publication":"European Journal of Applied Physiology","type":"journal_article","keyword":["Physiology (medical)","Public Health","Environmental and Occupational Health","Orthopedics and Sports Medicine","General Medicine","Public Health","Environmental and Occupational Health","Physiology"],"user_id":"89838","title":"Musculoskeletal effects of 5 days of bed rest with and without locomotion replacement training","extern":"1","doi":"10.1007/s00421-014-3045-0","_id":"33398","date_updated":"2022-09-15T09:55:02Z","date_created":"2022-09-15T09:36:45Z","publisher":"Springer Science and Business Media LLC","language":[{"iso":"eng"}],"publication_identifier":{"issn":["1439-6319","1439-6327"]},"year":"2014","status":"public","citation":{"apa":"Mulder, E., Clément, G., Linnarsson, D., Paloski, W. H., Wuyts, F. P., Zange, J., Frings-Meuthen, P., Johannes, B., Shushakov, V., Grunewald, M., Maassen, N., Bühlmeier, J., &#38; Rittweger, J. (2014). Musculoskeletal effects of 5 days of bed rest with and without locomotion replacement training. <i>European Journal of Applied Physiology</i>, <i>115</i>(4), 727–738. <a href=\"https://doi.org/10.1007/s00421-014-3045-0\">https://doi.org/10.1007/s00421-014-3045-0</a>","ama":"Mulder E, Clément G, Linnarsson D, et al. Musculoskeletal effects of 5 days of bed rest with and without locomotion replacement training. <i>European Journal of Applied Physiology</i>. 2014;115(4):727-738. doi:<a href=\"https://doi.org/10.1007/s00421-014-3045-0\">10.1007/s00421-014-3045-0</a>","chicago":"Mulder, E., G. Clément, D. Linnarsson, W. H. Paloski, F. P. Wuyts, J. Zange, P. Frings-Meuthen, et al. “Musculoskeletal Effects of 5 Days of Bed Rest with and without Locomotion Replacement Training.” <i>European Journal of Applied Physiology</i> 115, no. 4 (2014): 727–38. <a href=\"https://doi.org/10.1007/s00421-014-3045-0\">https://doi.org/10.1007/s00421-014-3045-0</a>.","ieee":"E. Mulder <i>et al.</i>, “Musculoskeletal effects of 5 days of bed rest with and without locomotion replacement training,” <i>European Journal of Applied Physiology</i>, vol. 115, no. 4, pp. 727–738, 2014, doi: <a href=\"https://doi.org/10.1007/s00421-014-3045-0\">10.1007/s00421-014-3045-0</a>.","mla":"Mulder, E., et al. “Musculoskeletal Effects of 5 Days of Bed Rest with and without Locomotion Replacement Training.” <i>European Journal of Applied Physiology</i>, vol. 115, no. 4, Springer Science and Business Media LLC, 2014, pp. 727–38, doi:<a href=\"https://doi.org/10.1007/s00421-014-3045-0\">10.1007/s00421-014-3045-0</a>.","bibtex":"@article{Mulder_Clément_Linnarsson_Paloski_Wuyts_Zange_Frings-Meuthen_Johannes_Shushakov_Grunewald_et al._2014, title={Musculoskeletal effects of 5 days of bed rest with and without locomotion replacement training}, volume={115}, DOI={<a href=\"https://doi.org/10.1007/s00421-014-3045-0\">10.1007/s00421-014-3045-0</a>}, number={4}, journal={European Journal of Applied Physiology}, publisher={Springer Science and Business Media LLC}, author={Mulder, E. and Clément, G. and Linnarsson, D. and Paloski, W. H. and Wuyts, F. P. and Zange, J. and Frings-Meuthen, P. and Johannes, B. and Shushakov, V. and Grunewald, M. and et al.}, year={2014}, pages={727–738} }","short":"E. Mulder, G. Clément, D. Linnarsson, W.H. Paloski, F.P. Wuyts, J. Zange, P. Frings-Meuthen, B. Johannes, V. Shushakov, M. Grunewald, N. Maassen, J. Bühlmeier, J. Rittweger, European Journal of Applied Physiology 115 (2014) 727–738."},"publication_status":"published","author":[{"last_name":"Mulder","full_name":"Mulder, E.","first_name":"E."},{"first_name":"G.","full_name":"Clément, G.","last_name":"Clément"},{"full_name":"Linnarsson, D.","first_name":"D.","last_name":"Linnarsson"},{"first_name":"W. H.","full_name":"Paloski, W. H.","last_name":"Paloski"},{"full_name":"Wuyts, F. P.","first_name":"F. P.","last_name":"Wuyts"},{"last_name":"Zange","full_name":"Zange, J.","first_name":"J."},{"last_name":"Frings-Meuthen","first_name":"P.","full_name":"Frings-Meuthen, P."},{"last_name":"Johannes","first_name":"B.","full_name":"Johannes, B."},{"first_name":"V.","full_name":"Shushakov, V.","last_name":"Shushakov"},{"first_name":"M.","full_name":"Grunewald, M.","last_name":"Grunewald"},{"last_name":"Maassen","first_name":"N.","full_name":"Maassen, N."},{"full_name":"Bühlmeier, Judith","first_name":"Judith","id":"89838","last_name":"Bühlmeier"},{"first_name":"J.","full_name":"Rittweger, J.","last_name":"Rittweger"}],"intvolume":"       115"},{"date_updated":"2022-01-06T06:51:07Z","page":"6827-6831","_id":"11716","status":"public","language":[{"iso":"eng"}],"type":"conference","publication_identifier":{"issn":["1520-6149"]},"year":"2013","date_created":"2019-07-12T05:26:53Z","publication":"Acoustics, Speech and Signal Processing (ICASSP), 2013 IEEE International Conference on","department":[{"_id":"54"}],"keyword":["Bayes methods","Gaussian processes","convolution","decision theory","decoding","noise","reverberation","speech coding","speech recognition","Bayesian decision rule","GMM","Gaussian mixture models","additive noise scenarios","automatic speech recognition systems","convolutive noise scenarios","decoding approach","mathematical framework","reverberant environments","significance decoding","speech feature estimation","uncertainty-of-observation techniques","Hidden Markov models","Maximum likelihood decoding","Noise","Speech","Speech recognition","Uncertainty","Uncertainty-of-observation","modified imputation","noise robust speech recognition","significance decoding","uncertainty decoding"],"user_id":"44006","citation":{"ama":"Abdelaziz AH, Zeiler S, Kolossa D, Leutnant V, Haeb-Umbach R. GMM-based significance decoding. In: <i>Acoustics, Speech and Signal Processing (ICASSP), 2013 IEEE International Conference On</i>. ; 2013:6827-6831. doi:<a href=\"https://doi.org/10.1109/ICASSP.2013.6638984\">10.1109/ICASSP.2013.6638984</a>","apa":"Abdelaziz, A. H., Zeiler, S., Kolossa, D., Leutnant, V., &#38; Haeb-Umbach, R. (2013). GMM-based significance decoding. In <i>Acoustics, Speech and Signal Processing (ICASSP), 2013 IEEE International Conference on</i> (pp. 6827–6831). <a href=\"https://doi.org/10.1109/ICASSP.2013.6638984\">https://doi.org/10.1109/ICASSP.2013.6638984</a>","ieee":"A. H. Abdelaziz, S. Zeiler, D. Kolossa, V. Leutnant, and R. Haeb-Umbach, “GMM-based significance decoding,” in <i>Acoustics, Speech and Signal Processing (ICASSP), 2013 IEEE International Conference on</i>, 2013, pp. 6827–6831.","chicago":"Abdelaziz, Ahmed H., Steffen Zeiler, Dorothea Kolossa, Volker Leutnant, and Reinhold Haeb-Umbach. “GMM-Based Significance Decoding.” In <i>Acoustics, Speech and Signal Processing (ICASSP), 2013 IEEE International Conference On</i>, 6827–31, 2013. <a href=\"https://doi.org/10.1109/ICASSP.2013.6638984\">https://doi.org/10.1109/ICASSP.2013.6638984</a>.","bibtex":"@inproceedings{Abdelaziz_Zeiler_Kolossa_Leutnant_Haeb-Umbach_2013, title={GMM-based significance decoding}, DOI={<a href=\"https://doi.org/10.1109/ICASSP.2013.6638984\">10.1109/ICASSP.2013.6638984</a>}, booktitle={Acoustics, Speech and Signal Processing (ICASSP), 2013 IEEE International Conference on}, author={Abdelaziz, Ahmed H. and Zeiler, Steffen and Kolossa, Dorothea and Leutnant, Volker and Haeb-Umbach, Reinhold}, year={2013}, pages={6827–6831} }","mla":"Abdelaziz, Ahmed H., et al. “GMM-Based Significance Decoding.” <i>Acoustics, Speech and Signal Processing (ICASSP), 2013 IEEE International Conference On</i>, 2013, pp. 6827–31, doi:<a href=\"https://doi.org/10.1109/ICASSP.2013.6638984\">10.1109/ICASSP.2013.6638984</a>.","short":"A.H. Abdelaziz, S. Zeiler, D. Kolossa, V. Leutnant, R. Haeb-Umbach, in: Acoustics, Speech and Signal Processing (ICASSP), 2013 IEEE International Conference On, 2013, pp. 6827–6831."},"doi":"10.1109/ICASSP.2013.6638984","abstract":[{"text":"The accuracy of automatic speech recognition systems in noisy and reverberant environments can be improved notably by exploiting the uncertainty of the estimated speech features using so-called uncertainty-of-observation techniques. In this paper, we introduce a new Bayesian decision rule that can serve as a mathematical framework from which both known and new uncertainty-of-observation techniques can be either derived or approximated. The new decision rule in its direct form leads to the new significance decoding approach for Gaussian mixture models, which results in better performance compared to standard uncertainty-of-observation techniques in different additive and convolutive noise scenarios.","lang":"eng"}],"author":[{"first_name":"Ahmed H.","full_name":"Abdelaziz, Ahmed H.","last_name":"Abdelaziz"},{"full_name":"Zeiler, Steffen","first_name":"Steffen","last_name":"Zeiler"},{"last_name":"Kolossa","first_name":"Dorothea","full_name":"Kolossa, Dorothea"},{"last_name":"Leutnant","full_name":"Leutnant, Volker","first_name":"Volker"},{"id":"242","last_name":"Haeb-Umbach","first_name":"Reinhold","full_name":"Haeb-Umbach, Reinhold"}],"title":"GMM-based significance decoding"},{"_id":"11841","page":" 22-23 ","date_updated":"2022-01-06T06:51:11Z","date_created":"2019-07-12T05:29:17Z","publication":" IEEE Workshop on Applications of Signal Processing to Audio and Acoustics ","status":"public","language":[{"iso":"eng"}],"type":"conference","year":"2013","main_file_link":[{"open_access":"1","url":"https://groups.uni-paderborn.de/nt/pubs/2013/Reverb2013.pdf"}],"keyword":["Reverberant speech","dereverberation","ASR","evaluation","challenge"],"oa":"1","user_id":"44006","citation":{"short":"K. Kinoshita, M. Delcroix, T. Yoshioka, T. Nakatani, E. Habets, R. Haeb-Umbach, V. Leutnant, A. Sehr, W. Kellermann, R. Maas, S. Gannot, B. Raj, in:  IEEE Workshop on Applications of Signal Processing to Audio and Acoustics , 2013, pp. 22–23.","bibtex":"@inproceedings{Kinoshita_Delcroix_Yoshioka_Nakatani_Habets_Haeb-Umbach_Leutnant_Sehr_Kellermann_Maas_et al._2013, title={The reverb challenge: a common evaluation framework for dereverberation and recognition of reverberant speech}, booktitle={ IEEE Workshop on Applications of Signal Processing to Audio and Acoustics }, author={Kinoshita, Keisuke and Delcroix, Marc and Yoshioka, Takuya and Nakatani, Tomohiro and Habets, Emanuel and Haeb-Umbach, Reinhold and Leutnant, Volker and Sehr, Armin and Kellermann, Walter and Maas, Roland and et al.}, year={2013}, pages={22–23} }","mla":"Kinoshita, Keisuke, et al. “The Reverb Challenge: A Common Evaluation Framework for Dereverberation and Recognition of Reverberant Speech.” <i> IEEE Workshop on Applications of Signal Processing to Audio and Acoustics </i>, 2013, pp. 22–23.","ieee":"K. Kinoshita <i>et al.</i>, “The reverb challenge: a common evaluation framework for dereverberation and recognition of reverberant speech,” in <i> IEEE Workshop on Applications of Signal Processing to Audio and Acoustics </i>, 2013, pp. 22–23.","chicago":"Kinoshita, Keisuke, Marc Delcroix, Takuya Yoshioka, Tomohiro Nakatani, Emanuel Habets, Reinhold Haeb-Umbach, Volker Leutnant, et al. “The Reverb Challenge: A Common Evaluation Framework for Dereverberation and Recognition of Reverberant Speech.” In <i> IEEE Workshop on Applications of Signal Processing to Audio and Acoustics </i>, 22–23, 2013.","ama":"Kinoshita K, Delcroix M, Yoshioka T, et al. The reverb challenge: a common evaluation framework for dereverberation and recognition of reverberant speech. In: <i> IEEE Workshop on Applications of Signal Processing to Audio and Acoustics </i>. ; 2013:22-23.","apa":"Kinoshita, K., Delcroix, M., Yoshioka, T., Nakatani, T., Habets, E., Haeb-Umbach, R., … Raj, B. (2013). The reverb challenge: a common evaluation framework for dereverberation and recognition of reverberant speech. In <i> IEEE Workshop on Applications of Signal Processing to Audio and Acoustics </i> (pp. 22–23)."},"department":[{"_id":"54"}],"author":[{"last_name":"Kinoshita","first_name":"Keisuke","full_name":"Kinoshita, Keisuke"},{"first_name":"Marc","full_name":"Delcroix, Marc","last_name":"Delcroix"},{"full_name":"Yoshioka, Takuya","first_name":"Takuya","last_name":"Yoshioka"},{"first_name":"Tomohiro","full_name":"Nakatani, Tomohiro","last_name":"Nakatani"},{"full_name":"Habets, Emanuel","first_name":"Emanuel","last_name":"Habets"},{"first_name":"Reinhold","full_name":"Haeb-Umbach, Reinhold","id":"242","last_name":"Haeb-Umbach"},{"first_name":"Volker","full_name":"Leutnant, Volker","last_name":"Leutnant"},{"full_name":"Sehr, Armin","first_name":"Armin","last_name":"Sehr"},{"last_name":"Kellermann","first_name":"Walter","full_name":"Kellermann, Walter"},{"last_name":"Maas","first_name":"Roland","full_name":"Maas, Roland"},{"first_name":"Sharon","full_name":"Gannot, Sharon","last_name":"Gannot"},{"first_name":"Bhiksha","full_name":"Raj, Bhiksha","last_name":"Raj"}],"title":"The reverb challenge: a common evaluation framework for dereverberation and recognition of reverberant speech","abstract":[{"text":"Recently, substantial progress has been made in the field of reverberant speech signal processing, including both single- and multichannel de-reverberation techniques, and automatic speech recognition (ASR) techniques robust to reverberation. To evaluate state-of-the-art algorithms and obtain new insights regarding potential future research directions, we propose a common evaluation framework including datasets, tasks, and evaluation metrics for both speech enhancement and ASR techniques. The proposed framework will be used as a common basis for the REVERB (REverberant Voice Enhancement and Recognition Benchmark) challenge. This paper describes the rationale behind the challenge, and provides a detailed description of the evaluation framework and benchmark results.","lang":"eng"}]},{"citation":{"apa":"Leutnant, V., Krueger, A., &#38; Haeb-Umbach, R. (2013). Bayesian Feature Enhancement for Reverberation and Noise Robust Speech Recognition. <i>IEEE Transactions on Audio, Speech, and Language Processing</i>, <i>21</i>(8), 1640–1652. <a href=\"https://doi.org/10.1109/TASL.2013.2258013\">https://doi.org/10.1109/TASL.2013.2258013</a>","ama":"Leutnant V, Krueger A, Haeb-Umbach R. Bayesian Feature Enhancement for Reverberation and Noise Robust Speech Recognition. <i>IEEE Transactions on Audio, Speech, and Language Processing</i>. 2013;21(8):1640-1652. doi:<a href=\"https://doi.org/10.1109/TASL.2013.2258013\">10.1109/TASL.2013.2258013</a>","ieee":"V. Leutnant, A. Krueger, and R. Haeb-Umbach, “Bayesian Feature Enhancement for Reverberation and Noise Robust Speech Recognition,” <i>IEEE Transactions on Audio, Speech, and Language Processing</i>, vol. 21, no. 8, pp. 1640–1652, 2013.","chicago":"Leutnant, Volker, Alexander Krueger, and Reinhold Haeb-Umbach. “Bayesian Feature Enhancement for Reverberation and Noise Robust Speech Recognition.” <i>IEEE Transactions on Audio, Speech, and Language Processing</i> 21, no. 8 (2013): 1640–52. <a href=\"https://doi.org/10.1109/TASL.2013.2258013\">https://doi.org/10.1109/TASL.2013.2258013</a>.","bibtex":"@article{Leutnant_Krueger_Haeb-Umbach_2013, title={Bayesian Feature Enhancement for Reverberation and Noise Robust Speech Recognition}, volume={21}, DOI={<a href=\"https://doi.org/10.1109/TASL.2013.2258013\">10.1109/TASL.2013.2258013</a>}, number={8}, journal={IEEE Transactions on Audio, Speech, and Language Processing}, author={Leutnant, Volker and Krueger, Alexander and Haeb-Umbach, Reinhold}, year={2013}, pages={1640–1652} }","mla":"Leutnant, Volker, et al. “Bayesian Feature Enhancement for Reverberation and Noise Robust Speech Recognition.” <i>IEEE Transactions on Audio, Speech, and Language Processing</i>, vol. 21, no. 8, 2013, pp. 1640–52, doi:<a href=\"https://doi.org/10.1109/TASL.2013.2258013\">10.1109/TASL.2013.2258013</a>.","short":"V. Leutnant, A. Krueger, R. Haeb-Umbach, IEEE Transactions on Audio, Speech, and Language Processing 21 (2013) 1640–1652."},"user_id":"44006","keyword":["Bayes methods","compensation","error statistics","reverberation","speech recognition","Bayesian feature enhancement","background noise","clean speech feature vectors","compensation","connected digits recognition task","error statistics","memory requirements","noisy reverberant data","posteriori probability density function","recursive formulation","reverberant logarithmic mel power spectral coefficients","robust automatic speech recognition","signal-to-noise ratios","time-variant observation","word error rate reduction","Robust automatic speech recognition","model-based Bayesian feature enhancement","observation model for reverberant and noisy speech","recursive observation model"],"department":[{"_id":"54"}],"title":"Bayesian Feature Enhancement for Reverberation and Noise Robust Speech Recognition","author":[{"last_name":"Leutnant","first_name":"Volker","full_name":"Leutnant, Volker"},{"first_name":"Alexander","full_name":"Krueger, Alexander","last_name":"Krueger"},{"first_name":"Reinhold","full_name":"Haeb-Umbach, Reinhold","last_name":"Haeb-Umbach","id":"242"}],"intvolume":"        21","doi":"10.1109/TASL.2013.2258013","abstract":[{"text":"In this contribution we extend a previously proposed Bayesian approach for the enhancement of reverberant logarithmic mel power spectral coefficients for robust automatic speech recognition to the additional compensation of background noise. A recently proposed observation model is employed whose time-variant observation error statistics are obtained as a side product of the inference of the a posteriori probability density function of the clean speech feature vectors. Further a reduction of the computational effort and the memory requirements are achieved by using a recursive formulation of the observation model. The performance of the proposed algorithms is first experimentally studied on a connected digits recognition task with artificially created noisy reverberant data. It is shown that the use of the time-variant observation error model leads to a significant error rate reduction at low signal-to-noise ratios compared to a time-invariant model. Further experiments were conducted on a 5000 word task recorded in a reverberant and noisy environment. A significant word error rate reduction was obtained demonstrating the effectiveness of the approach on real-world data.","lang":"eng"}],"_id":"11862","page":"1640-1652","volume":21,"date_updated":"2022-01-06T06:51:11Z","issue":"8","publication":"IEEE Transactions on Audio, Speech, and Language Processing","date_created":"2019-07-12T05:29:42Z","year":"2013","type":"journal_article","language":[{"iso":"eng"}],"status":"public"},{"department":[{"_id":"54"}],"citation":{"mla":"Vu, Dang Hai Tran, and Reinhold Haeb-Umbach. “Using the Turbo Principle for Exploiting Temporal and Spectral Correlations in Speech Presence Probability Estimation.” <i>38th International Conference on Acoustics, Speech and Signal Processing (ICASSP 2013)</i>, 2013, pp. 863–67, doi:<a href=\"https://doi.org/10.1109/ICASSP.2013.6637771\">10.1109/ICASSP.2013.6637771</a>.","bibtex":"@inproceedings{Vu_Haeb-Umbach_2013, title={Using the turbo principle for exploiting temporal and spectral correlations in speech presence probability estimation}, DOI={<a href=\"https://doi.org/10.1109/ICASSP.2013.6637771\">10.1109/ICASSP.2013.6637771</a>}, booktitle={38th International Conference on Acoustics, Speech and Signal Processing (ICASSP 2013)}, author={Vu, Dang Hai Tran and Haeb-Umbach, Reinhold}, year={2013}, pages={863–867} }","short":"D.H.T. Vu, R. Haeb-Umbach, in: 38th International Conference on Acoustics, Speech and Signal Processing (ICASSP 2013), 2013, pp. 863–867.","ama":"Vu DHT, Haeb-Umbach R. Using the turbo principle for exploiting temporal and spectral correlations in speech presence probability estimation. In: <i>38th International Conference on Acoustics, Speech and Signal Processing (ICASSP 2013)</i>. ; 2013:863-867. doi:<a href=\"https://doi.org/10.1109/ICASSP.2013.6637771\">10.1109/ICASSP.2013.6637771</a>","apa":"Vu, D. H. T., &#38; Haeb-Umbach, R. (2013). Using the turbo principle for exploiting temporal and spectral correlations in speech presence probability estimation. In <i>38th International Conference on Acoustics, Speech and Signal Processing (ICASSP 2013)</i> (pp. 863–867). <a href=\"https://doi.org/10.1109/ICASSP.2013.6637771\">https://doi.org/10.1109/ICASSP.2013.6637771</a>","chicago":"Vu, Dang Hai Tran, and Reinhold Haeb-Umbach. “Using the Turbo Principle for Exploiting Temporal and Spectral Correlations in Speech Presence Probability Estimation.” In <i>38th International Conference on Acoustics, Speech and Signal Processing (ICASSP 2013)</i>, 863–67, 2013. <a href=\"https://doi.org/10.1109/ICASSP.2013.6637771\">https://doi.org/10.1109/ICASSP.2013.6637771</a>.","ieee":"D. H. T. Vu and R. Haeb-Umbach, “Using the turbo principle for exploiting temporal and spectral correlations in speech presence probability estimation,” in <i>38th International Conference on Acoustics, Speech and Signal Processing (ICASSP 2013)</i>, 2013, pp. 863–867."},"user_id":"44006","keyword":["correlation methods","estimation theory","hidden Markov models","iterative methods","probability","spectral analysis","speech processing","2D HMM","SPP estimates","iterative algorithm","posterior probability estimation","spectral correlation","speech presence probability estimation","state-of-the-art SPP estimation algorithm","temporal correlation","turbo principle","two-dimensional hidden Markov model","Correlation","Decoding","Estimation","Iterative decoding","Noise","Speech","Vectors"],"doi":"10.1109/ICASSP.2013.6637771","abstract":[{"text":"In this paper we present a speech presence probability (SPP) estimation algorithmwhich exploits both temporal and spectral correlations of speech. To this end, the SPP estimation is formulated as the posterior probability estimation of the states of a two-dimensional (2D) Hidden Markov Model (HMM). We derive an iterative algorithm to decode the 2D-HMM which is based on the turbo principle. The experimental results show that indeed the SPP estimates improve from iteration to iteration, and further clearly outperform another state-of-the-art SPP estimation algorithm.","lang":"eng"}],"title":"Using the turbo principle for exploiting temporal and spectral correlations in speech presence probability estimation","author":[{"last_name":"Vu","full_name":"Vu, Dang Hai Tran","first_name":"Dang Hai Tran"},{"full_name":"Haeb-Umbach, Reinhold","first_name":"Reinhold","id":"242","last_name":"Haeb-Umbach"}],"date_updated":"2022-01-06T06:51:12Z","_id":"11917","page":"863-867","year":"2013","publication_identifier":{"issn":["1520-6149"]},"type":"conference","language":[{"iso":"eng"}],"status":"public","publication":"38th International Conference on Acoustics, Speech and Signal Processing (ICASSP 2013)","date_created":"2019-07-12T05:30:45Z"},{"date_updated":"2022-01-06T06:51:08Z","_id":"11745","year":"2012","type":"conference","language":[{"iso":"eng"}],"status":"public","publication":"37th International Conference on Acoustics, Speech and Signal Processing (ICASSP 2012)","related_material":{"link":[{"description":"Presentation","relation":"supplementary_material","url":"https://groups.uni-paderborn.de/nt/pubs/2012/ChKrDaHa12_Talk.pdf"}]},"date_created":"2019-07-12T05:27:26Z","department":[{"_id":"54"}],"citation":{"ama":"Chinaev A, Krueger A, Tran Vu DH, Haeb-Umbach R. Improved Noise Power Spectral Density Tracking by a MAP-based Postprocessor. In: <i>37th International Conference on Acoustics, Speech and Signal Processing (ICASSP 2012)</i>. ; 2012.","apa":"Chinaev, A., Krueger, A., Tran Vu, D. H., &#38; Haeb-Umbach, R. (2012). Improved Noise Power Spectral Density Tracking by a MAP-based Postprocessor. In <i>37th International Conference on Acoustics, Speech and Signal Processing (ICASSP 2012)</i>.","ieee":"A. Chinaev, A. Krueger, D. H. Tran Vu, and R. Haeb-Umbach, “Improved Noise Power Spectral Density Tracking by a MAP-based Postprocessor,” in <i>37th International Conference on Acoustics, Speech and Signal Processing (ICASSP 2012)</i>, 2012.","chicago":"Chinaev, Aleksej, Alexander Krueger, Dang Hai Tran Vu, and Reinhold Haeb-Umbach. “Improved Noise Power Spectral Density Tracking by a MAP-Based Postprocessor.” In <i>37th International Conference on Acoustics, Speech and Signal Processing (ICASSP 2012)</i>, 2012.","bibtex":"@inproceedings{Chinaev_Krueger_Tran Vu_Haeb-Umbach_2012, title={Improved Noise Power Spectral Density Tracking by a MAP-based Postprocessor}, booktitle={37th International Conference on Acoustics, Speech and Signal Processing (ICASSP 2012)}, author={Chinaev, Aleksej and Krueger, Alexander and Tran Vu, Dang Hai and Haeb-Umbach, Reinhold}, year={2012} }","mla":"Chinaev, Aleksej, et al. “Improved Noise Power Spectral Density Tracking by a MAP-Based Postprocessor.” <i>37th International Conference on Acoustics, Speech and Signal Processing (ICASSP 2012)</i>, 2012.","short":"A. Chinaev, A. Krueger, D.H. Tran Vu, R. Haeb-Umbach, in: 37th International Conference on Acoustics, Speech and Signal Processing (ICASSP 2012), 2012."},"oa":"1","user_id":"44006","main_file_link":[{"url":"https://groups.uni-paderborn.de/nt/pubs/2012/ChKrDaHa12.pdf","open_access":"1"}],"keyword":["MAP parameter estimation","noise power estimation","speech enhancement"],"abstract":[{"lang":"eng","text":"In this paper we present a novel noise power spectral density tracking algorithm and its use in single-channel speech enhancement. It has the unique feature that it is able to track the noise statistics even if speech is dominant in a given time-frequency bin. As a consequence it can follow non-stationary noise superposed by speech, even in the critical case of rising noise power. The algorithm requires an initial estimate of the power spectrum of speech and is thus meant to be used as a postprocessor to a first speech enhancement stage. An experimental comparison with a state-of-the-art noise tracking algorithm demonstrates lower estimation errors under low SNR conditions and smaller fluctuations of the estimated values, resulting in improved speech quality as measured by PESQ scores."}],"title":"Improved Noise Power Spectral Density Tracking by a MAP-based Postprocessor","author":[{"first_name":"Aleksej","full_name":"Chinaev, Aleksej","last_name":"Chinaev"},{"last_name":"Krueger","first_name":"Alexander","full_name":"Krueger, Alexander"},{"last_name":"Tran Vu","full_name":"Tran Vu, Dang Hai","first_name":"Dang Hai"},{"id":"242","last_name":"Haeb-Umbach","full_name":"Haeb-Umbach, Reinhold","first_name":"Reinhold"}]},{"title":"A Statistical Observation Model For Noisy Reverberant Speech Features and its Application to Robust ASR","author":[{"first_name":"Volker","full_name":"Leutnant, Volker","last_name":"Leutnant"},{"full_name":"Krueger, Alexander","first_name":"Alexander","last_name":"Krueger"},{"id":"242","last_name":"Haeb-Umbach","full_name":"Haeb-Umbach, Reinhold","first_name":"Reinhold"}],"abstract":[{"text":"In this work, an observation model for the joint compensation of noise and reverberation in the logarithmic mel power spectral density domain is considered. It relates the features of the noisy reverberant speech to those of the non-reverberant speech and the noise. In contrast to enhancement of features only corrupted by reverberation (reverberant features), enhancement of noisy reverberant features requires a more sophisticated model for the error introduced by the proposed observation model. In a first consideration, it will be shown that this error is highly dependent on the instantaneous ratio of the power of reverberant speech to the power of the noise and, moreover, sensitive to the phase between reverberant speech and noise in the short-time discrete Fourier domain. Afterwards, a statistically motivated approach will be presented allowing for the model of the observation error to be inferred from the error model previously used for the reverberation only case. Finally, the developed observation error model will be utilized in a Bayesian feature enhancement scheme, leading to improvements in word accuracy on the AURORA5 database.","lang":"eng"}],"citation":{"ama":"Leutnant V, Krueger A, Haeb-Umbach R. A Statistical Observation Model For Noisy Reverberant Speech Features and its Application to Robust ASR. In: <i>Signal Processing, Communications and Computing (ICSPCC), 2012 IEEE International Conference On</i>. ; 2012.","apa":"Leutnant, V., Krueger, A., &#38; Haeb-Umbach, R. (2012). A Statistical Observation Model For Noisy Reverberant Speech Features and its Application to Robust ASR. In <i>Signal Processing, Communications and Computing (ICSPCC), 2012 IEEE International Conference on</i>.","chicago":"Leutnant, Volker, Alexander Krueger, and Reinhold Haeb-Umbach. “A Statistical Observation Model For Noisy Reverberant Speech Features and Its Application to Robust ASR.” In <i>Signal Processing, Communications and Computing (ICSPCC), 2012 IEEE International Conference On</i>, 2012.","ieee":"V. Leutnant, A. Krueger, and R. Haeb-Umbach, “A Statistical Observation Model For Noisy Reverberant Speech Features and its Application to Robust ASR,” in <i>Signal Processing, Communications and Computing (ICSPCC), 2012 IEEE International Conference on</i>, 2012.","mla":"Leutnant, Volker, et al. “A Statistical Observation Model For Noisy Reverberant Speech Features and Its Application to Robust ASR.” <i>Signal Processing, Communications and Computing (ICSPCC), 2012 IEEE International Conference On</i>, 2012.","bibtex":"@inproceedings{Leutnant_Krueger_Haeb-Umbach_2012, title={A Statistical Observation Model For Noisy Reverberant Speech Features and its Application to Robust ASR}, booktitle={Signal Processing, Communications and Computing (ICSPCC), 2012 IEEE International Conference on}, author={Leutnant, Volker and Krueger, Alexander and Haeb-Umbach, Reinhold}, year={2012} }","short":"V. Leutnant, A. Krueger, R. Haeb-Umbach, in: Signal Processing, Communications and Computing (ICSPCC), 2012 IEEE International Conference On, 2012."},"keyword":["Robust Automatic Speech Recognition","Bayesian feature enhancement","observation model for reverberant and noisy speech"],"main_file_link":[{"open_access":"1","url":"http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6335731"}],"oa":"1","user_id":"44006","department":[{"_id":"54"}],"date_created":"2019-07-12T05:29:44Z","publication":"Signal Processing, Communications and Computing (ICSPCC), 2012 IEEE International Conference on","language":[{"iso":"eng"}],"year":"2012","type":"conference","status":"public","_id":"11864","date_updated":"2022-01-06T06:51:11Z"},{"user_id":"44006","oa":"1","keyword":["acoustical transfer function ratio","adaptive eigenvector tracking","array signal processing","beamformer design","blocking matrix","eigenvalues and eigenfunctions","eigenvector-based transfer function ratios estimation","generalized sidelobe canceler","interference reduction","iterative methods","power iteration method","reduced speech distortions","reverberant enclosure","reverberation","speech enhancement","stationary noise"],"main_file_link":[{"open_access":"1","url":"https://groups.uni-paderborn.de/nt/pubs/2011/KrWaHa11.pdf"}],"doi":"10.1109/TASL.2010.2047324","abstract":[{"text":"In this paper, we present a novel blocking matrix and fixed beamformer design for a generalized sidelobe canceler for speech enhancement in a reverberant enclosure. They are based on a new method for estimating the acoustical transfer function ratios in the presence of stationary noise. The estimation method relies on solving a generalized eigenvalue problem in each frequency bin. An adaptive eigenvector tracking utilizing the power iteration method is employed and shown to achieve a high convergence speed. Simulation results demonstrate that the proposed beamformer leads to better noise and interference reduction and reduced speech distortions compared to other blocking matrix designs from the literature.","lang":"eng"}],"title":"Speech Enhancement With a GSC-Like Structure Employing Eigenvector-Based Transfer Function Ratios Estimation","issue":"1","page":"206-219","volume":19,"type":"journal_article","publication":"IEEE Transactions on Audio, Speech, and Language Processing","department":[{"_id":"54"}],"citation":{"apa":"Krueger, A., Warsitz, E., &#38; Haeb-Umbach, R. (2011). Speech Enhancement With a GSC-Like Structure Employing Eigenvector-Based Transfer Function Ratios Estimation. <i>IEEE Transactions on Audio, Speech, and Language Processing</i>, <i>19</i>(1), 206–219. <a href=\"https://doi.org/10.1109/TASL.2010.2047324\">https://doi.org/10.1109/TASL.2010.2047324</a>","ama":"Krueger A, Warsitz E, Haeb-Umbach R. Speech Enhancement With a GSC-Like Structure Employing Eigenvector-Based Transfer Function Ratios Estimation. <i>IEEE Transactions on Audio, Speech, and Language Processing</i>. 2011;19(1):206-219. doi:<a href=\"https://doi.org/10.1109/TASL.2010.2047324\">10.1109/TASL.2010.2047324</a>","ieee":"A. Krueger, E. Warsitz, and R. Haeb-Umbach, “Speech Enhancement With a GSC-Like Structure Employing Eigenvector-Based Transfer Function Ratios Estimation,” <i>IEEE Transactions on Audio, Speech, and Language Processing</i>, vol. 19, no. 1, pp. 206–219, 2011.","chicago":"Krueger, Alexander, Ernst Warsitz, and Reinhold Haeb-Umbach. “Speech Enhancement With a GSC-Like Structure Employing Eigenvector-Based Transfer Function Ratios Estimation.” <i>IEEE Transactions on Audio, Speech, and Language Processing</i> 19, no. 1 (2011): 206–19. <a href=\"https://doi.org/10.1109/TASL.2010.2047324\">https://doi.org/10.1109/TASL.2010.2047324</a>.","bibtex":"@article{Krueger_Warsitz_Haeb-Umbach_2011, title={Speech Enhancement With a GSC-Like Structure Employing Eigenvector-Based Transfer Function Ratios Estimation}, volume={19}, DOI={<a href=\"https://doi.org/10.1109/TASL.2010.2047324\">10.1109/TASL.2010.2047324</a>}, number={1}, journal={IEEE Transactions on Audio, Speech, and Language Processing}, author={Krueger, Alexander and Warsitz, Ernst and Haeb-Umbach, Reinhold}, year={2011}, pages={206–219} }","mla":"Krueger, Alexander, et al. “Speech Enhancement With a GSC-Like Structure Employing Eigenvector-Based Transfer Function Ratios Estimation.” <i>IEEE Transactions on Audio, Speech, and Language Processing</i>, vol. 19, no. 1, 2011, pp. 206–19, doi:<a href=\"https://doi.org/10.1109/TASL.2010.2047324\">10.1109/TASL.2010.2047324</a>.","short":"A. Krueger, E. Warsitz, R. Haeb-Umbach, IEEE Transactions on Audio, Speech, and Language Processing 19 (2011) 206–219."},"intvolume":"        19","author":[{"full_name":"Krueger, Alexander","first_name":"Alexander","last_name":"Krueger"},{"first_name":"Ernst","full_name":"Warsitz, Ernst","last_name":"Warsitz"},{"first_name":"Reinhold","full_name":"Haeb-Umbach, Reinhold","id":"242","last_name":"Haeb-Umbach"}],"date_updated":"2022-01-06T06:51:11Z","_id":"11850","year":"2011","language":[{"iso":"eng"}],"status":"public","date_created":"2019-07-12T05:29:28Z"},{"extern":"1","title":"High sodium chloride intake exacerbates immobilization-induced bone resorption and protein losses","abstract":[{"lang":"eng","text":"<jats:p>We examined, in immobilization, the effect of a diet high in sodium chloride (NaCl) on bone markers, nitrogen balance, and acid-base status. Eight healthy male test subjects participated in a 14-day head-down-tilt bed rest (HDBR) study. During the bed rest period they received, in a randomized crossover design, a high (7.7 meq Na<jats:sup>+</jats:sup>/kg body wt per day) and a low (0.7 meq Na<jats:sup>+</jats:sup>/kg body wt per day) NaCl diet. As expected, 24-h excretion of urinary calcium was significantly greater in the high-NaCl-intake HDBR phase than in the low-NaCl-intake HDBR phase ( P &lt; 0.001). High NaCl intake caused a 43–50% greater excretion of the bone resorption markers COOH- (CTX) and NH<jats:sub>2</jats:sub>- (NTX) terminal telopeptide of type I collagen in HDBR than low NaCl in HDBR (CTX/NTX: P &lt; 0.001). Serum concentrations of the bone formation markers bone-specific alkaline phosphatase (bAP) and NH<jats:sub>2</jats:sub>-terminal propeptide of type I procollagen (PINP) were identical in both NaCl intake phases. High NaCl intake led to a more negative nitrogen balance in HDBR ( P &lt; 0.001). Changes were accompanied by increased serum chloride concentration ( P = 0.008), reduced blood bicarbonate ( P = 0.017), and base excess ( P = 0.009) whereas net acid excretion was lower during high than during low NaCl intake in immobilization ( P &lt; 0.001). High NaCl intake during immobilization exacerbates disuse-induced bone and muscle loss by causing further protein wasting and an increase in bone resorption. Changes in the acid-base status, mainly caused by disturbances in electrolyte metabolism, seem to determine NaCl-induced degradation processes.</jats:p>"}],"doi":"10.1152/japplphysiol.00454.2011","user_id":"89838","keyword":["Physiology (medical)","Physiology"],"publication":"Journal of Applied Physiology","type":"journal_article","volume":111,"page":"537-542","issue":"2","author":[{"first_name":"Petra","full_name":"Frings-Meuthen, Petra","last_name":"Frings-Meuthen"},{"first_name":"Judith","full_name":"Bühlmeier, Judith","id":"89838","last_name":"Bühlmeier"},{"first_name":"Natalie","full_name":"Baecker, Natalie","last_name":"Baecker"},{"last_name":"Stehle","full_name":"Stehle, Peter","first_name":"Peter"},{"last_name":"Fimmers","full_name":"Fimmers, Rolf","first_name":"Rolf"},{"last_name":"May","first_name":"Francisca","full_name":"May, Francisca"},{"last_name":"Kluge","first_name":"Goetz","full_name":"Kluge, Goetz"},{"full_name":"Heer, Martina","first_name":"Martina","last_name":"Heer"}],"intvolume":"       111","publication_status":"published","citation":{"ieee":"P. Frings-Meuthen <i>et al.</i>, “High sodium chloride intake exacerbates immobilization-induced bone resorption and protein losses,” <i>Journal of Applied Physiology</i>, vol. 111, no. 2, pp. 537–542, 2011, doi: <a href=\"https://doi.org/10.1152/japplphysiol.00454.2011\">10.1152/japplphysiol.00454.2011</a>.","chicago":"Frings-Meuthen, Petra, Judith Bühlmeier, Natalie Baecker, Peter Stehle, Rolf Fimmers, Francisca May, Goetz Kluge, and Martina Heer. “High Sodium Chloride Intake Exacerbates Immobilization-Induced Bone Resorption and Protein Losses.” <i>Journal of Applied Physiology</i> 111, no. 2 (2011): 537–42. <a href=\"https://doi.org/10.1152/japplphysiol.00454.2011\">https://doi.org/10.1152/japplphysiol.00454.2011</a>.","ama":"Frings-Meuthen P, Bühlmeier J, Baecker N, et al. High sodium chloride intake exacerbates immobilization-induced bone resorption and protein losses. <i>Journal of Applied Physiology</i>. 2011;111(2):537-542. doi:<a href=\"https://doi.org/10.1152/japplphysiol.00454.2011\">10.1152/japplphysiol.00454.2011</a>","apa":"Frings-Meuthen, P., Bühlmeier, J., Baecker, N., Stehle, P., Fimmers, R., May, F., Kluge, G., &#38; Heer, M. (2011). High sodium chloride intake exacerbates immobilization-induced bone resorption and protein losses. <i>Journal of Applied Physiology</i>, <i>111</i>(2), 537–542. <a href=\"https://doi.org/10.1152/japplphysiol.00454.2011\">https://doi.org/10.1152/japplphysiol.00454.2011</a>","short":"P. Frings-Meuthen, J. Bühlmeier, N. Baecker, P. Stehle, R. Fimmers, F. May, G. Kluge, M. Heer, Journal of Applied Physiology 111 (2011) 537–542.","bibtex":"@article{Frings-Meuthen_Bühlmeier_Baecker_Stehle_Fimmers_May_Kluge_Heer_2011, title={High sodium chloride intake exacerbates immobilization-induced bone resorption and protein losses}, volume={111}, DOI={<a href=\"https://doi.org/10.1152/japplphysiol.00454.2011\">10.1152/japplphysiol.00454.2011</a>}, number={2}, journal={Journal of Applied Physiology}, publisher={American Physiological Society}, author={Frings-Meuthen, Petra and Bühlmeier, Judith and Baecker, Natalie and Stehle, Peter and Fimmers, Rolf and May, Francisca and Kluge, Goetz and Heer, Martina}, year={2011}, pages={537–542} }","mla":"Frings-Meuthen, Petra, et al. “High Sodium Chloride Intake Exacerbates Immobilization-Induced Bone Resorption and Protein Losses.” <i>Journal of Applied Physiology</i>, vol. 111, no. 2, American Physiological Society, 2011, pp. 537–42, doi:<a href=\"https://doi.org/10.1152/japplphysiol.00454.2011\">10.1152/japplphysiol.00454.2011</a>."},"publisher":"American Physiological Society","date_created":"2022-09-15T09:37:29Z","status":"public","year":"2011","publication_identifier":{"issn":["8750-7587","1522-1601"]},"language":[{"iso":"eng"}],"_id":"33400","date_updated":"2022-09-15T09:43:45Z"},{"title":"Mindful tutors: Linguistic choice and action demonstration in speech to infants and a simulated robot","doi":"10.1075/is.12.1.06fis","abstract":[{"text":"It has been proposed that the design of robots might benefit from interactions that are similar to caregiver–child interactions, which is tailored to children’s respective capacities to a high degree. However, so far little is known about how people adapt their tutoring behaviour to robots and whether robots can evoke input that is similar to child-directed interaction. The paper presents detailed analyses of speakers’ linguistic and non-linguistic behaviour, such as action demonstration, in two comparable situations: In one experiment, parents described and explained to their nonverbal infants the use of certain everyday objects; in the other experiment, participants tutored a simulated robot on the same objects. The results, which show considerable differences between the two situations on almost all measures, are discussed in the light of the computer-as-social-actor paradigm and the register hypothesis.","lang":"eng"}],"keyword":["human–robot interaction (HRI)","social communication","register theory","motionese","robotese","child-directed speech (CDS)","motherese","mindless transfer","computers-as-social-actors"],"user_id":"14931","publication":"Interaction Studies","type":"journal_article","volume":12,"page":"134-161","issue":"1","author":[{"first_name":"Kerstin","full_name":"Fischer, Kerstin","last_name":"Fischer"},{"full_name":"Foth, Kilian","first_name":"Kilian","last_name":"Foth"},{"last_name":"Rohlfing","id":"50352","full_name":"Rohlfing, Katharina","first_name":"Katharina"},{"last_name":"Wrede","full_name":"Wrede, Britta","first_name":"Britta"}],"intvolume":"        12","citation":{"short":"K. Fischer, K. Foth, K. Rohlfing, B. Wrede, Interaction Studies 12 (2011) 134–161.","bibtex":"@article{Fischer_Foth_Rohlfing_Wrede_2011, title={Mindful tutors: Linguistic choice and action demonstration in speech to infants and a simulated robot}, volume={12}, DOI={<a href=\"https://doi.org/10.1075/is.12.1.06fis\">10.1075/is.12.1.06fis</a>}, number={1}, journal={Interaction Studies}, publisher={John Benjamins Publishing Company}, author={Fischer, Kerstin and Foth, Kilian and Rohlfing, Katharina and Wrede, Britta}, year={2011}, pages={134–161} }","mla":"Fischer, Kerstin, et al. “Mindful Tutors: Linguistic Choice and Action Demonstration in Speech to Infants and a Simulated Robot.” <i>Interaction Studies</i>, vol. 12, no. 1, John Benjamins Publishing Company, 2011, pp. 134–61, doi:<a href=\"https://doi.org/10.1075/is.12.1.06fis\">10.1075/is.12.1.06fis</a>.","ieee":"K. Fischer, K. Foth, K. Rohlfing, and B. Wrede, “Mindful tutors: Linguistic choice and action demonstration in speech to infants and a simulated robot,” <i>Interaction Studies</i>, vol. 12, no. 1, pp. 134–161, 2011, doi: <a href=\"https://doi.org/10.1075/is.12.1.06fis\">10.1075/is.12.1.06fis</a>.","chicago":"Fischer, Kerstin, Kilian Foth, Katharina Rohlfing, and Britta Wrede. “Mindful Tutors: Linguistic Choice and Action Demonstration in Speech to Infants and a Simulated Robot.” <i>Interaction Studies</i> 12, no. 1 (2011): 134–61. <a href=\"https://doi.org/10.1075/is.12.1.06fis\">https://doi.org/10.1075/is.12.1.06fis</a>.","apa":"Fischer, K., Foth, K., Rohlfing, K., &#38; Wrede, B. (2011). Mindful tutors: Linguistic choice and action demonstration in speech to infants and a simulated robot. <i>Interaction Studies</i>, <i>12</i>(1), 134–161. <a href=\"https://doi.org/10.1075/is.12.1.06fis\">https://doi.org/10.1075/is.12.1.06fis</a>","ama":"Fischer K, Foth K, Rohlfing K, Wrede B. Mindful tutors: Linguistic choice and action demonstration in speech to infants and a simulated robot. <i>Interaction Studies</i>. 2011;12(1):134-161. doi:<a href=\"https://doi.org/10.1075/is.12.1.06fis\">10.1075/is.12.1.06fis</a>"},"department":[{"_id":"749"}],"publisher":"John Benjamins Publishing Company","date_created":"2020-06-24T13:01:57Z","status":"public","language":[{"iso":"eng"}],"publication_identifier":{"issn":["1572-0381"]},"year":"2011","_id":"17233","date_updated":"2023-02-01T12:56:04Z"},{"type":"journal_article","publication":"IEEE Transactions on Audio, Speech, and Language Processing","issue":"7","volume":18,"page":"1692-1707","doi":"10.1109/TASL.2010.2049684","abstract":[{"text":"In this paper, we present a new technique for automatic speech recognition (ASR) in reverberant environments. Our approach is aimed at the enhancement of the logarithmic Mel power spectrum, which is computed at an intermediate stage to obtain the widely used Mel frequency cepstral coefficients (MFCCs). Given the reverberant logarithmic Mel power spectral coefficients (LMPSCs), a minimum mean square error estimate of the clean LMPSCs is computed by carrying out Bayesian inference. We employ switching linear dynamical models as an a priori model for the dynamics of the clean LMPSCs. Further, we derive a stochastic observation model which relates the clean to the reverberant LMPSCs through a simplified model of the room impulse response (RIR). This model requires only two parameters, namely RIR energy and reverberation time, which can be estimated from the captured microphone signal. The performance of the proposed enhancement technique is studied on the AURORA5 database and compared to that of constrained maximum-likelihood linear regression (CMLLR). It is shown by experimental results that our approach significantly outperforms CMLLR and that up to 80\\% of the errors caused by the reverberation are recovered. In addition to the fact that the approach is compatible with the standard MFCC feature vectors, it leaves the ASR back-end unchanged. It is of moderate computational complexity and suitable for real time applications.","lang":"eng"}],"title":"Model-Based Feature Enhancement for Reverberant Speech Recognition","keyword":["ASR","AURORA5 database","automatic speech recognition","Bayesian inference","belief networks","CMLLR","computational complexity","constrained maximum likelihood linear regression","least mean squares methods","LMPSC computation","logarithmic Mel power spectrum","maximum likelihood estimation","Mel frequency cepstral coefficients","MFCC feature vectors","microphone signal","minimum mean square error estimation","model-based feature enhancement","regression analysis","reverberant speech recognition","reverberation","RIR energy","room impulse response","speech recognition","stochastic observation model","stochastic processes"],"main_file_link":[{"url":"https://groups.uni-paderborn.de/nt/pubs/2010/KrHa10.pdf","open_access":"1"}],"oa":"1","user_id":"44006","status":"public","language":[{"iso":"eng"}],"year":"2010","date_created":"2019-07-12T05:29:23Z","date_updated":"2022-01-06T06:51:11Z","_id":"11846","intvolume":"        18","author":[{"first_name":"Alexander","full_name":"Krueger, Alexander","last_name":"Krueger"},{"full_name":"Haeb-Umbach, Reinhold","first_name":"Reinhold","id":"242","last_name":"Haeb-Umbach"}],"department":[{"_id":"54"}],"citation":{"ama":"Krueger A, Haeb-Umbach R. Model-Based Feature Enhancement for Reverberant Speech Recognition. <i>IEEE Transactions on Audio, Speech, and Language Processing</i>. 2010;18(7):1692-1707. doi:<a href=\"https://doi.org/10.1109/TASL.2010.2049684\">10.1109/TASL.2010.2049684</a>","apa":"Krueger, A., &#38; Haeb-Umbach, R. (2010). Model-Based Feature Enhancement for Reverberant Speech Recognition. <i>IEEE Transactions on Audio, Speech, and Language Processing</i>, <i>18</i>(7), 1692–1707. <a href=\"https://doi.org/10.1109/TASL.2010.2049684\">https://doi.org/10.1109/TASL.2010.2049684</a>","ieee":"A. Krueger and R. Haeb-Umbach, “Model-Based Feature Enhancement for Reverberant Speech Recognition,” <i>IEEE Transactions on Audio, Speech, and Language Processing</i>, vol. 18, no. 7, pp. 1692–1707, 2010.","chicago":"Krueger, Alexander, and Reinhold Haeb-Umbach. “Model-Based Feature Enhancement for Reverberant Speech Recognition.” <i>IEEE Transactions on Audio, Speech, and Language Processing</i> 18, no. 7 (2010): 1692–1707. <a href=\"https://doi.org/10.1109/TASL.2010.2049684\">https://doi.org/10.1109/TASL.2010.2049684</a>.","bibtex":"@article{Krueger_Haeb-Umbach_2010, title={Model-Based Feature Enhancement for Reverberant Speech Recognition}, volume={18}, DOI={<a href=\"https://doi.org/10.1109/TASL.2010.2049684\">10.1109/TASL.2010.2049684</a>}, number={7}, journal={IEEE Transactions on Audio, Speech, and Language Processing}, author={Krueger, Alexander and Haeb-Umbach, Reinhold}, year={2010}, pages={1692–1707} }","mla":"Krueger, Alexander, and Reinhold Haeb-Umbach. “Model-Based Feature Enhancement for Reverberant Speech Recognition.” <i>IEEE Transactions on Audio, Speech, and Language Processing</i>, vol. 18, no. 7, 2010, pp. 1692–707, doi:<a href=\"https://doi.org/10.1109/TASL.2010.2049684\">10.1109/TASL.2010.2049684</a>.","short":"A. Krueger, R. Haeb-Umbach, IEEE Transactions on Audio, Speech, and Language Processing 18 (2010) 1692–1707."}},{"status":"public","year":"2010","type":"conference","language":[{"iso":"eng"}],"publication":"IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2010)","date_created":"2019-07-12T05:30:40Z","date_updated":"2022-01-06T06:51:12Z","_id":"11913","page":"241-244","doi":"10.1109/ICASSP.2010.5495994","abstract":[{"text":"In this paper we propose to employ directional statistics in a complex vector space to approach the problem of blind speech separation in the presence of spatially correlated noise. We interpret the values of the short time Fourier transform of the microphone signals to be draws from a mixture of complex Watson distributions, a probabilistic model which naturally accounts for spatial aliasing. The parameters of the density are related to the a priori source probabilities, the power of the sources and the transfer function ratios from sources to sensors. Estimation formulas are derived for these parameters by employing the Expectation Maximization (EM) algorithm. The E-step corresponds to the estimation of the source presence probabilities for each time-frequency bin, while the M-step leads to a maximum signal-to-noise ratio (MaxSNR) beamformer in the presence of uncertainty about the source activity. Experimental results are reported for an implementation in a generalized sidelobe canceller (GSC) like spatial beamforming configuration for 3 speech sources with significant coherent noise in reverberant environments, demonstrating the usefulness of the novel modeling framework.","lang":"eng"}],"author":[{"last_name":"Tran Vu","full_name":"Tran Vu, Dang Hai","first_name":"Dang Hai"},{"id":"242","last_name":"Haeb-Umbach","full_name":"Haeb-Umbach, Reinhold","first_name":"Reinhold"}],"title":"Blind speech separation employing directional statistics in an Expectation Maximization framework","department":[{"_id":"54"}],"user_id":"44006","oa":"1","main_file_link":[{"url":"https://groups.uni-paderborn.de/nt/pubs/2010/DaHa10-2.pdf","open_access":"1"}],"keyword":["array signal processing","blind source separation","blind speech separation","complex vector space","complex Watson distribution","directional statistics","expectation-maximisation algorithm","expectation maximization algorithm","Fourier transform","Fourier transforms","generalized sidelobe canceller","interference suppression","maximum signal-to-noise ratio beamformer","microphone signal","probabilistic model","spatial aliasing","spatial beamforming configuration","speech enhancement","statistical distributions"],"citation":{"ieee":"D. H. Tran Vu and R. Haeb-Umbach, “Blind speech separation employing directional statistics in an Expectation Maximization framework,” in <i>IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2010)</i>, 2010, pp. 241–244.","chicago":"Tran Vu, Dang Hai, and Reinhold Haeb-Umbach. “Blind Speech Separation Employing Directional Statistics in an Expectation Maximization Framework.” In <i>IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2010)</i>, 241–44, 2010. <a href=\"https://doi.org/10.1109/ICASSP.2010.5495994\">https://doi.org/10.1109/ICASSP.2010.5495994</a>.","ama":"Tran Vu DH, Haeb-Umbach R. Blind speech separation employing directional statistics in an Expectation Maximization framework. In: <i>IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2010)</i>. ; 2010:241-244. doi:<a href=\"https://doi.org/10.1109/ICASSP.2010.5495994\">10.1109/ICASSP.2010.5495994</a>","apa":"Tran Vu, D. H., &#38; Haeb-Umbach, R. (2010). Blind speech separation employing directional statistics in an Expectation Maximization framework. In <i>IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2010)</i> (pp. 241–244). <a href=\"https://doi.org/10.1109/ICASSP.2010.5495994\">https://doi.org/10.1109/ICASSP.2010.5495994</a>","short":"D.H. Tran Vu, R. Haeb-Umbach, in: IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2010), 2010, pp. 241–244.","bibtex":"@inproceedings{Tran Vu_Haeb-Umbach_2010, title={Blind speech separation employing directional statistics in an Expectation Maximization framework}, DOI={<a href=\"https://doi.org/10.1109/ICASSP.2010.5495994\">10.1109/ICASSP.2010.5495994</a>}, booktitle={IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2010)}, author={Tran Vu, Dang Hai and Haeb-Umbach, Reinhold}, year={2010}, pages={241–244} }","mla":"Tran Vu, Dang Hai, and Reinhold Haeb-Umbach. “Blind Speech Separation Employing Directional Statistics in an Expectation Maximization Framework.” <i>IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2010)</i>, 2010, pp. 241–44, doi:<a href=\"https://doi.org/10.1109/ICASSP.2010.5495994\">10.1109/ICASSP.2010.5495994</a>."}},{"quality_controlled":"1","publication":"IEEE Journal of Selected Topics in Signal Processing","type":"journal_article","volume":4,"page":"845-856","issue":"5","title":"Online Diarization of Streaming Audio-Visual Data for Smart Environments","doi":"10.1109/JSTSP.2010.2050519","abstract":[{"lang":"eng","text":"For an environment to be perceived as being smart, contextual information has to be gathered to adapt the system's behavior and its interface towards the user. Being a rich source of context information speech can be acquired unobtrusively by microphone arrays and then processed to extract information about the user and his environment. In this paper, a system for joint temporal segmentation, speaker localization, and identification is presented, which is supported by face identification from video data obtained from a steerable camera. Special attention is paid to latency aspects and online processing capabilities, as they are important for the application under investigation, namely ambient communication. It describes the vision of terminal-less, session-less and multi-modal telecommunication with remote partners, where the user can move freely within his home while the communication follows him. The speaker diarization serves as a context source, which has been integrated in a service-oriented middleware architecture and provided to the application to select the most appropriate I/O device and to steer the camera towards the speaker during ambient communication."}],"user_id":"460","oa":"1","main_file_link":[{"open_access":"1","url":"https://groups.uni-paderborn.de/nt/pubs/2010/ScHa10.pdf"}],"keyword":["audio streaming","audio visual data streaming","context information speech","face identification","face recognition","image segmentation","middleware","multimodal telecommunication","online diarization","service oriented middleware architecture","sessionless telecommunication","software architecture","speaker identification","speaker localization","speaker recognition","steerable camera","telecommunication computing","temporal segmentation","terminal-less telecommunication","video streaming"],"date_created":"2019-07-12T05:30:16Z","status":"public","year":"2010","language":[{"iso":"eng"}],"_id":"11892","date_updated":"2023-10-26T08:10:18Z","author":[{"last_name":"Schmalenstroeer","id":"460","full_name":"Schmalenstroeer, Joerg","first_name":"Joerg"},{"id":"242","last_name":"Haeb-Umbach","full_name":"Haeb-Umbach, Reinhold","first_name":"Reinhold"}],"intvolume":"         4","citation":{"ieee":"J. Schmalenstroeer and R. Haeb-Umbach, “Online Diarization of Streaming Audio-Visual Data for Smart Environments,” <i>IEEE Journal of Selected Topics in Signal Processing</i>, vol. 4, no. 5, pp. 845–856, 2010, doi: <a href=\"https://doi.org/10.1109/JSTSP.2010.2050519\">10.1109/JSTSP.2010.2050519</a>.","chicago":"Schmalenstroeer, Joerg, and Reinhold Haeb-Umbach. “Online Diarization of Streaming Audio-Visual Data for Smart Environments.” <i>IEEE Journal of Selected Topics in Signal Processing</i> 4, no. 5 (2010): 845–56. <a href=\"https://doi.org/10.1109/JSTSP.2010.2050519\">https://doi.org/10.1109/JSTSP.2010.2050519</a>.","ama":"Schmalenstroeer J, Haeb-Umbach R. Online Diarization of Streaming Audio-Visual Data for Smart Environments. <i>IEEE Journal of Selected Topics in Signal Processing</i>. 2010;4(5):845-856. doi:<a href=\"https://doi.org/10.1109/JSTSP.2010.2050519\">10.1109/JSTSP.2010.2050519</a>","apa":"Schmalenstroeer, J., &#38; Haeb-Umbach, R. (2010). Online Diarization of Streaming Audio-Visual Data for Smart Environments. <i>IEEE Journal of Selected Topics in Signal Processing</i>, <i>4</i>(5), 845–856. <a href=\"https://doi.org/10.1109/JSTSP.2010.2050519\">https://doi.org/10.1109/JSTSP.2010.2050519</a>","short":"J. Schmalenstroeer, R. Haeb-Umbach, IEEE Journal of Selected Topics in Signal Processing 4 (2010) 845–856.","bibtex":"@article{Schmalenstroeer_Haeb-Umbach_2010, title={Online Diarization of Streaming Audio-Visual Data for Smart Environments}, volume={4}, DOI={<a href=\"https://doi.org/10.1109/JSTSP.2010.2050519\">10.1109/JSTSP.2010.2050519</a>}, number={5}, journal={IEEE Journal of Selected Topics in Signal Processing}, author={Schmalenstroeer, Joerg and Haeb-Umbach, Reinhold}, year={2010}, pages={845–856} }","mla":"Schmalenstroeer, Joerg, and Reinhold Haeb-Umbach. “Online Diarization of Streaming Audio-Visual Data for Smart Environments.” <i>IEEE Journal of Selected Topics in Signal Processing</i>, vol. 4, no. 5, 2010, pp. 845–56, doi:<a href=\"https://doi.org/10.1109/JSTSP.2010.2050519\">10.1109/JSTSP.2010.2050519</a>."},"department":[{"_id":"54"}]},{"abstract":[{"text":"In automatic speech recognition, hidden Markov models (HMMs) are commonly used for speech decoding, while switching linear dynamic models (SLDMs) can be employed for a preceding model-based speech feature enhancement. In this paper, these model types are combined in order to obtain a novel iterative speech feature enhancement and recognition architecture. It is shown that speech feature enhancement with SLDMs can be improved by feeding back information from the HMM to the enhancement stage. Two different feedback structures are derived. In the first, the posteriors of the HMM states are used to control the model probabilities of the SLDMs, while in the second they are employed to directly influence the estimate of the speech feature distribution. Both approaches lead to improvements in recognition accuracy both on the AURORA2 and AURORA4 databases compared to non-iterative speech feature enhancement with SLDMs. It is also shown that a combination with uncertainty decoding further enhances performance.","lang":"eng"}],"doi":"10.1109/TASL.2009.2014894","title":"Approaches to Iterative Speech Feature Enhancement and Recognition","keyword":["AURORA2 databases","AURORA4 databases","automatic speech recognition","feedback structures","hidden Markov models","HMM","iterative methods","iterative speech feature enhancement","model probabilities","speech decoding","speech enhancement","speech feature distribution","speech recognition","switching linear dynamic models"],"main_file_link":[{"url":"https://groups.uni-paderborn.de/nt/pubs/2009/WiHa09-1.pdf","open_access":"1"}],"oa":"1","user_id":"44006","type":"journal_article","publication":"IEEE Transactions on Audio, Speech, and Language Processing","issue":"5","volume":17,"page":"974-984","intvolume":"        17","author":[{"first_name":"Stefan","full_name":"Windmann, Stefan","last_name":"Windmann"},{"last_name":"Haeb-Umbach","id":"242","first_name":"Reinhold","full_name":"Haeb-Umbach, Reinhold"}],"department":[{"_id":"54"}],"citation":{"ama":"Windmann S, Haeb-Umbach R. Approaches to Iterative Speech Feature Enhancement and Recognition. <i>IEEE Transactions on Audio, Speech, and Language Processing</i>. 2009;17(5):974-984. doi:<a href=\"https://doi.org/10.1109/TASL.2009.2014894\">10.1109/TASL.2009.2014894</a>","apa":"Windmann, S., &#38; Haeb-Umbach, R. (2009). Approaches to Iterative Speech Feature Enhancement and Recognition. <i>IEEE Transactions on Audio, Speech, and Language Processing</i>, <i>17</i>(5), 974–984. <a href=\"https://doi.org/10.1109/TASL.2009.2014894\">https://doi.org/10.1109/TASL.2009.2014894</a>","chicago":"Windmann, Stefan, and Reinhold Haeb-Umbach. “Approaches to Iterative Speech Feature Enhancement and Recognition.” <i>IEEE Transactions on Audio, Speech, and Language Processing</i> 17, no. 5 (2009): 974–84. <a href=\"https://doi.org/10.1109/TASL.2009.2014894\">https://doi.org/10.1109/TASL.2009.2014894</a>.","ieee":"S. Windmann and R. Haeb-Umbach, “Approaches to Iterative Speech Feature Enhancement and Recognition,” <i>IEEE Transactions on Audio, Speech, and Language Processing</i>, vol. 17, no. 5, pp. 974–984, 2009.","mla":"Windmann, Stefan, and Reinhold Haeb-Umbach. “Approaches to Iterative Speech Feature Enhancement and Recognition.” <i>IEEE Transactions on Audio, Speech, and Language Processing</i>, vol. 17, no. 5, 2009, pp. 974–84, doi:<a href=\"https://doi.org/10.1109/TASL.2009.2014894\">10.1109/TASL.2009.2014894</a>.","bibtex":"@article{Windmann_Haeb-Umbach_2009, title={Approaches to Iterative Speech Feature Enhancement and Recognition}, volume={17}, DOI={<a href=\"https://doi.org/10.1109/TASL.2009.2014894\">10.1109/TASL.2009.2014894</a>}, number={5}, journal={IEEE Transactions on Audio, Speech, and Language Processing}, author={Windmann, Stefan and Haeb-Umbach, Reinhold}, year={2009}, pages={974–984} }","short":"S. Windmann, R. Haeb-Umbach, IEEE Transactions on Audio, Speech, and Language Processing 17 (2009) 974–984."},"status":"public","language":[{"iso":"eng"}],"year":"2009","date_created":"2019-07-12T05:31:08Z","date_updated":"2022-01-06T06:51:12Z","_id":"11937"},{"date_created":"2019-07-12T05:31:09Z","year":"2009","language":[{"iso":"eng"}],"status":"public","_id":"11938","date_updated":"2022-01-06T06:51:12Z","author":[{"last_name":"Windmann","full_name":"Windmann, Stefan","first_name":"Stefan"},{"full_name":"Haeb-Umbach, Reinhold","first_name":"Reinhold","last_name":"Haeb-Umbach","id":"242"}],"intvolume":"        17","citation":{"short":"S. Windmann, R. Haeb-Umbach, IEEE Transactions on Audio, Speech, and Language Processing 17 (2009) 1577–1590.","mla":"Windmann, Stefan, and Reinhold Haeb-Umbach. “Parameter Estimation of a State-Space Model of Noise for Robust Speech Recognition.” <i>IEEE Transactions on Audio, Speech, and Language Processing</i>, vol. 17, no. 8, 2009, pp. 1577–90, doi:<a href=\"https://doi.org/10.1109/TASL.2009.2023172\">10.1109/TASL.2009.2023172</a>.","bibtex":"@article{Windmann_Haeb-Umbach_2009, title={Parameter Estimation of a State-Space Model of Noise for Robust Speech Recognition}, volume={17}, DOI={<a href=\"https://doi.org/10.1109/TASL.2009.2023172\">10.1109/TASL.2009.2023172</a>}, number={8}, journal={IEEE Transactions on Audio, Speech, and Language Processing}, author={Windmann, Stefan and Haeb-Umbach, Reinhold}, year={2009}, pages={1577–1590} }","chicago":"Windmann, Stefan, and Reinhold Haeb-Umbach. “Parameter Estimation of a State-Space Model of Noise for Robust Speech Recognition.” <i>IEEE Transactions on Audio, Speech, and Language Processing</i> 17, no. 8 (2009): 1577–90. <a href=\"https://doi.org/10.1109/TASL.2009.2023172\">https://doi.org/10.1109/TASL.2009.2023172</a>.","ieee":"S. Windmann and R. Haeb-Umbach, “Parameter Estimation of a State-Space Model of Noise for Robust Speech Recognition,” <i>IEEE Transactions on Audio, Speech, and Language Processing</i>, vol. 17, no. 8, pp. 1577–1590, 2009.","ama":"Windmann S, Haeb-Umbach R. Parameter Estimation of a State-Space Model of Noise for Robust Speech Recognition. <i>IEEE Transactions on Audio, Speech, and Language Processing</i>. 2009;17(8):1577-1590. doi:<a href=\"https://doi.org/10.1109/TASL.2009.2023172\">10.1109/TASL.2009.2023172</a>","apa":"Windmann, S., &#38; Haeb-Umbach, R. (2009). Parameter Estimation of a State-Space Model of Noise for Robust Speech Recognition. <i>IEEE Transactions on Audio, Speech, and Language Processing</i>, <i>17</i>(8), 1577–1590. <a href=\"https://doi.org/10.1109/TASL.2009.2023172\">https://doi.org/10.1109/TASL.2009.2023172</a>"},"department":[{"_id":"54"}],"publication":"IEEE Transactions on Audio, Speech, and Language Processing","type":"journal_article","page":"1577-1590","volume":17,"issue":"8","title":"Parameter Estimation of a State-Space Model of Noise for Robust Speech Recognition","doi":"10.1109/TASL.2009.2023172","abstract":[{"text":"In this paper, parameter estimation of a state-space model of noise or noisy speech cepstra is investigated. A blockwise EM algorithm is derived for the estimation of the state and observation noise covariance from noise-only input data. It is supposed to be used during the offline training mode of a speech recognizer. Further a sequential online EM algorithm is developed to adapt the observation noise covariance on noisy speech cepstra at its input. The estimated parameters are then used in model-based speech feature enhancement for noise-robust automatic speech recognition. Experiments on the AURORA4 database lead to improved recognition results with a linear state model compared to the assumption of stationary noise.","lang":"eng"}],"oa":"1","user_id":"44006","keyword":["AURORA4 database","blockwise EM algorithm","covariance analysis","linear state model","noise covariance","noise-robust automatic speech recognition","noisy speech cepstra","offline training mode","parameter estimation","speech recognition","speech recognition equipment","speech recognizer","state-space methods","state-space model"],"main_file_link":[{"url":"https://groups.uni-paderborn.de/nt/pubs/2009/WiHa09-2.pdf","open_access":"1"}]},{"abstract":[{"lang":"eng","text":"In developmental research, tutoring behavior has been identified as scaffolding infants' learning processes. It has been defined in terms of child-directed speech (Motherese), child-directed motion (Motionese), and contingency. In the field of developmental robotics, research often assumes that in human-robot interaction (HRI), robots are treated similar to infants, because their immature cognitive capabilities benefit from this behavior. However, according to our knowledge, it has barely been studied whether this is true and how exactly humans alter their behavior towards a robotic interaction partner. In this paper, we present results concerning the acceptance of a robotic agent in a social learning scenario obtained via comparison to adults and 8-11 months old infants in equal conditions. These results constitute an important empirical basis for making use of tutoring behavior in social robotics. In our study, we performed a detailed multimodal analysis of HRI in a tutoring situation using the example of a robot simulation equipped with a bottom-up saliency-based attention model. Our results reveal significant differences in hand movement velocity, motion pauses, range of motion, and eye gaze suggesting that for example adults decrease their hand movement velocity in an Adult-Child Interaction (ACI), opposed to an Adult-Adult Interaction (AAI) and this decrease is even higher in the Adult-Robot Interaction (ARI). We also found important differences between ACI and ARI in how the behavior is modified over time as the interaction unfolds. These findings indicate the necessity of integrating top-down feedback structures into a bottom-up system for robots to be fully accepted as interaction partners."}],"doi":"10.1109/DEVLRN.2009.5175516","title":"People modify their tutoring behavior in robot-directed interaction for action learning","author":[{"last_name":"Vollmer","first_name":"Anna-Lisa","full_name":"Vollmer, Anna-Lisa"},{"full_name":"Lohan, Katrin Solveig","first_name":"Katrin Solveig","last_name":"Lohan"},{"first_name":"Kerstin","full_name":"Fischer, Kerstin","last_name":"Fischer"},{"first_name":"Yukie","full_name":"Nagai, Yukie","last_name":"Nagai"},{"first_name":"Karola","full_name":"Pitsch, Karola","last_name":"Pitsch"},{"first_name":"Jannik","full_name":"Fritsch, Jannik","last_name":"Fritsch"},{"full_name":"Rohlfing, Katharina","first_name":"Katharina","last_name":"Rohlfing","id":"50352"},{"last_name":"Wrede","first_name":"Britta","full_name":"Wrede, Britta"}],"department":[{"_id":"749"}],"citation":{"short":"A.-L. Vollmer, K.S. Lohan, K. Fischer, Y. Nagai, K. Pitsch, J. Fritsch, K. Rohlfing, B. Wrede, in: Development and Learning, 2009. ICDL 2009. IEEE 8th International Conference on Development and Learning, IEEE, 2009, pp. 1–6.","bibtex":"@inproceedings{Vollmer_Lohan_Fischer_Nagai_Pitsch_Fritsch_Rohlfing_Wrede_2009, title={People modify their tutoring behavior in robot-directed interaction for action learning}, DOI={<a href=\"https://doi.org/10.1109/DEVLRN.2009.5175516\">10.1109/DEVLRN.2009.5175516</a>}, booktitle={Development and Learning, 2009. ICDL 2009. IEEE 8th International Conference on Development and Learning}, publisher={IEEE}, author={Vollmer, Anna-Lisa and Lohan, Katrin Solveig and Fischer, Kerstin and Nagai, Yukie and Pitsch, Karola and Fritsch, Jannik and Rohlfing, Katharina and Wrede, Britta}, year={2009}, pages={1–6} }","mla":"Vollmer, Anna-Lisa, et al. “People Modify Their Tutoring Behavior in Robot-Directed Interaction for Action Learning.” <i>Development and Learning, 2009. ICDL 2009. IEEE 8th International Conference on Development and Learning</i>, IEEE, 2009, pp. 1–6, doi:<a href=\"https://doi.org/10.1109/DEVLRN.2009.5175516\">10.1109/DEVLRN.2009.5175516</a>.","ieee":"A.-L. Vollmer <i>et al.</i>, “People modify their tutoring behavior in robot-directed interaction for action learning,” in <i>Development and Learning, 2009. ICDL 2009. IEEE 8th International Conference on Development and Learning</i>, 2009, pp. 1–6, doi: <a href=\"https://doi.org/10.1109/DEVLRN.2009.5175516\">10.1109/DEVLRN.2009.5175516</a>.","chicago":"Vollmer, Anna-Lisa, Katrin Solveig Lohan, Kerstin Fischer, Yukie Nagai, Karola Pitsch, Jannik Fritsch, Katharina Rohlfing, and Britta Wrede. “People Modify Their Tutoring Behavior in Robot-Directed Interaction for Action Learning.” In <i>Development and Learning, 2009. ICDL 2009. IEEE 8th International Conference on Development and Learning</i>, 1–6. IEEE, 2009. <a href=\"https://doi.org/10.1109/DEVLRN.2009.5175516\">https://doi.org/10.1109/DEVLRN.2009.5175516</a>.","apa":"Vollmer, A.-L., Lohan, K. S., Fischer, K., Nagai, Y., Pitsch, K., Fritsch, J., Rohlfing, K., &#38; Wrede, B. (2009). People modify their tutoring behavior in robot-directed interaction for action learning. <i>Development and Learning, 2009. ICDL 2009. IEEE 8th International Conference on Development and Learning</i>, 1–6. <a href=\"https://doi.org/10.1109/DEVLRN.2009.5175516\">https://doi.org/10.1109/DEVLRN.2009.5175516</a>","ama":"Vollmer A-L, Lohan KS, Fischer K, et al. People modify their tutoring behavior in robot-directed interaction for action learning. In: <i>Development and Learning, 2009. ICDL 2009. IEEE 8th International Conference on Development and Learning</i>. IEEE; 2009:1-6. doi:<a href=\"https://doi.org/10.1109/DEVLRN.2009.5175516\">10.1109/DEVLRN.2009.5175516</a>"},"user_id":"14931","keyword":["robot simulation","hand movement velocity","robotic interaction partner","robotic agent","robot-directed interaction","multimodal analysis","Motionese","Motherese","intelligent tutoring systems","immature cognitive capability","human computer interaction","eye gaze","child-directed speech","child-directed motion","bottom-up system","bottom-up saliency-based attention model","adult-robot interaction","adult-child interaction","adult-adult interaction","human-robot interaction","action learning","social learning scenario","social robotics","software agents","top-down feedback structures","tutoring behavior"],"type":"conference","year":"2009","language":[{"iso":"eng"}],"status":"public","publication":"Development and Learning, 2009. ICDL 2009. IEEE 8th International Conference on Development and Learning","date_created":"2020-06-24T13:02:43Z","publisher":"IEEE","date_updated":"2023-02-01T13:06:43Z","page":"1-6","_id":"17272"},{"citation":{"ieee":"V. Ion and R. Haeb-Umbach, “A Novel Uncertainty Decoding Rule With Applications to Transmission Error Robust Speech Recognition,” <i>IEEE Transactions on Audio, Speech, and Language Processing</i>, vol. 16, no. 5, pp. 1047–1060, 2008.","chicago":"Ion, Valentin, and Reinhold Haeb-Umbach. “A Novel Uncertainty Decoding Rule With Applications to Transmission Error Robust Speech Recognition.” <i>IEEE Transactions on Audio, Speech, and Language Processing</i> 16, no. 5 (2008): 1047–60. <a href=\"https://doi.org/10.1109/TASL.2008.925879\">https://doi.org/10.1109/TASL.2008.925879</a>.","apa":"Ion, V., &#38; Haeb-Umbach, R. (2008). A Novel Uncertainty Decoding Rule With Applications to Transmission Error Robust Speech Recognition. <i>IEEE Transactions on Audio, Speech, and Language Processing</i>, <i>16</i>(5), 1047–1060. <a href=\"https://doi.org/10.1109/TASL.2008.925879\">https://doi.org/10.1109/TASL.2008.925879</a>","ama":"Ion V, Haeb-Umbach R. A Novel Uncertainty Decoding Rule With Applications to Transmission Error Robust Speech Recognition. <i>IEEE Transactions on Audio, Speech, and Language Processing</i>. 2008;16(5):1047-1060. doi:<a href=\"https://doi.org/10.1109/TASL.2008.925879\">10.1109/TASL.2008.925879</a>","short":"V. Ion, R. Haeb-Umbach, IEEE Transactions on Audio, Speech, and Language Processing 16 (2008) 1047–1060.","bibtex":"@article{Ion_Haeb-Umbach_2008, title={A Novel Uncertainty Decoding Rule With Applications to Transmission Error Robust Speech Recognition}, volume={16}, DOI={<a href=\"https://doi.org/10.1109/TASL.2008.925879\">10.1109/TASL.2008.925879</a>}, number={5}, journal={IEEE Transactions on Audio, Speech, and Language Processing}, author={Ion, Valentin and Haeb-Umbach, Reinhold}, year={2008}, pages={1047–1060} }","mla":"Ion, Valentin, and Reinhold Haeb-Umbach. “A Novel Uncertainty Decoding Rule With Applications to Transmission Error Robust Speech Recognition.” <i>IEEE Transactions on Audio, Speech, and Language Processing</i>, vol. 16, no. 5, 2008, pp. 1047–60, doi:<a href=\"https://doi.org/10.1109/TASL.2008.925879\">10.1109/TASL.2008.925879</a>."},"department":[{"_id":"54"}],"author":[{"last_name":"Ion","full_name":"Ion, Valentin","first_name":"Valentin"},{"full_name":"Haeb-Umbach, Reinhold","first_name":"Reinhold","last_name":"Haeb-Umbach","id":"242"}],"intvolume":"        16","_id":"11820","date_updated":"2022-01-06T06:51:10Z","date_created":"2019-07-12T05:28:53Z","status":"public","year":"2008","language":[{"iso":"eng"}],"user_id":"44006","oa":"1","main_file_link":[{"open_access":"1","url":"https://groups.uni-paderborn.de/nt/pubs/2008/IoHa08-1.pdf"}],"keyword":["automatic speech recognition","bit errors","codecs","communication links","corrupted observations","decoding","distributed speech recognition","error-prone communication network","feature vector sequence","hidden Markov model-based ASR","hidden Markov models","inter-frame correlation","Internet telephony","network speech recognition","packet loss","speech posterior","speech recognition","transmission error robust speech recognition","uncertainty decoding","voice-over-IP codecs"],"title":"A Novel Uncertainty Decoding Rule With Applications to Transmission Error Robust Speech Recognition","doi":"10.1109/TASL.2008.925879","abstract":[{"lang":"eng","text":"In this paper, we derive an uncertainty decoding rule for automatic speech recognition (ASR), which accounts for both corrupted observations and inter-frame correlation. The conditional independence assumption, prevalent in hidden Markov model-based ASR, is relaxed to obtain a clean speech posterior that is conditioned on the complete observed feature vector sequence. This is a more informative posterior than one conditioned only on the current observation. The novel decoding is used to obtain a transmission-error robust remote ASR system, where the speech capturing unit is connected to the decoder via an error-prone communication network. We show how the clean speech posterior can be computed for communication links being characterized by either bit errors or packet loss. Recognition results are presented for both distributed and network speech recognition, where in the latter case common voice-over-IP codecs are employed."}],"volume":16,"page":"1047-1060","issue":"5","publication":"IEEE Transactions on Audio, Speech, and Language Processing","type":"journal_article"},{"date_updated":"2022-01-06T06:51:12Z","_id":"11935","page":"73-76","status":"public","language":[{"iso":"eng"}],"year":"2008","type":"conference","date_created":"2019-07-12T05:31:06Z","publication":"IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2008)","department":[{"_id":"54"}],"keyword":["adaptive interference canceller","adaptive signal processing","array signal processing","beamforming method","eigenvalues and eigenfunctions","generalized eigenvector blocking matrix","generalized sidelobe canceller","interference suppression","matrix algebra","noise suppression","speech enhancement","transfer function estimation","transfer functions"],"main_file_link":[{"open_access":"1","url":"https://groups.uni-paderborn.de/nt/pubs/2008/WaKrHa08.pdf"}],"user_id":"44006","oa":"1","citation":{"chicago":"Warsitz, Ernst, Alexander Krueger, and Reinhold Haeb-Umbach. “Speech Enhancement with a New Generalized Eigenvector Blocking Matrix for Application in a Generalized Sidelobe Canceller.” In <i>IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2008)</i>, 73–76, 2008. <a href=\"https://doi.org/10.1109/ICASSP.2008.4517549\">https://doi.org/10.1109/ICASSP.2008.4517549</a>.","ieee":"E. Warsitz, A. Krueger, and R. Haeb-Umbach, “Speech enhancement with a new generalized eigenvector blocking matrix for application in a generalized sidelobe canceller,” in <i>IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2008)</i>, 2008, pp. 73–76.","ama":"Warsitz E, Krueger A, Haeb-Umbach R. Speech enhancement with a new generalized eigenvector blocking matrix for application in a generalized sidelobe canceller. In: <i>IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2008)</i>. ; 2008:73-76. doi:<a href=\"https://doi.org/10.1109/ICASSP.2008.4517549\">10.1109/ICASSP.2008.4517549</a>","apa":"Warsitz, E., Krueger, A., &#38; Haeb-Umbach, R. (2008). Speech enhancement with a new generalized eigenvector blocking matrix for application in a generalized sidelobe canceller. In <i>IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2008)</i> (pp. 73–76). <a href=\"https://doi.org/10.1109/ICASSP.2008.4517549\">https://doi.org/10.1109/ICASSP.2008.4517549</a>","short":"E. Warsitz, A. Krueger, R. Haeb-Umbach, in: IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2008), 2008, pp. 73–76.","mla":"Warsitz, Ernst, et al. “Speech Enhancement with a New Generalized Eigenvector Blocking Matrix for Application in a Generalized Sidelobe Canceller.” <i>IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2008)</i>, 2008, pp. 73–76, doi:<a href=\"https://doi.org/10.1109/ICASSP.2008.4517549\">10.1109/ICASSP.2008.4517549</a>.","bibtex":"@inproceedings{Warsitz_Krueger_Haeb-Umbach_2008, title={Speech enhancement with a new generalized eigenvector blocking matrix for application in a generalized sidelobe canceller}, DOI={<a href=\"https://doi.org/10.1109/ICASSP.2008.4517549\">10.1109/ICASSP.2008.4517549</a>}, booktitle={IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2008)}, author={Warsitz, Ernst and Krueger, Alexander and Haeb-Umbach, Reinhold}, year={2008}, pages={73–76} }"},"abstract":[{"lang":"eng","text":"The generalized sidelobe canceller by Griffith and Jim is a robust beamforming method to enhance a desired (speech) signal in the presence of stationary noise. Its performance depends to a high degree on the construction of the blocking matrix which produces noise reference signals for the subsequent adaptive interference canceller. Especially in reverberated environments the beamformer may suffer from signal leakage and reduced noise suppression. In this paper a new blocking matrix is proposed. It is based on a generalized eigenvalue problem whose solution provides an indirect estimation of the transfer functions from the source to the sensors. The quality of the new generalized eigenvector blocking matrix is studied in simulated rooms with different reverberation times and is compared to alternatives proposed in the literature."}],"doi":"10.1109/ICASSP.2008.4517549","author":[{"full_name":"Warsitz, Ernst","first_name":"Ernst","last_name":"Warsitz"},{"full_name":"Krueger, Alexander","first_name":"Alexander","last_name":"Krueger"},{"id":"242","last_name":"Haeb-Umbach","full_name":"Haeb-Umbach, Reinhold","first_name":"Reinhold"}],"title":"Speech enhancement with a new generalized eigenvector blocking matrix for application in a generalized sidelobe canceller"},{"abstract":[{"text":"In this paper a switching linear dynamical model (SLDM) approach for speech feature enhancement is improved by employing more accurate models for the dynamics of speech and noise. The model of the clean speech feature trajectory is improved by augmenting the state vector to capture information derived from the delta features. Further a hidden noise state variable is introduced to obtain a more elaborated model for the noise dynamics. Approximate Bayesian inference in the SLDM is carried out by a bank of extended Kalman filters, whose outputs are combined according to the a posteriori probability of the individual state models. Experimental results on the AURORA2 database show improved recognition accuracy.","lang":"eng"}],"doi":"10.1109/ICASSP.2008.4518633","author":[{"first_name":"Stefan","full_name":"Windmann, Stefan","last_name":"Windmann"},{"id":"242","last_name":"Haeb-Umbach","full_name":"Haeb-Umbach, Reinhold","first_name":"Reinhold"}],"title":"Modeling the dynamics of speech and noise for speech feature enhancement in ASR","department":[{"_id":"54"}],"keyword":["a posteriori probability","AURORA2 database","Bayesian inference","Bayes methods","channel bank filters","extended Kalman filter banks","hidden noise state variable","Kalman filters","noise dynamics","speech enhancement","speech feature enhancement","speech feature trajectory","switching linear dynamical model approach"],"main_file_link":[{"open_access":"1","url":"https://groups.uni-paderborn.de/nt/pubs/2008/WiHa08-1.pdf"}],"user_id":"44006","oa":"1","citation":{"chicago":"Windmann, Stefan, and Reinhold Haeb-Umbach. “Modeling the Dynamics of Speech and Noise for Speech Feature Enhancement in ASR.” In <i>IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2008)</i>, 4409–12, 2008. <a href=\"https://doi.org/10.1109/ICASSP.2008.4518633\">https://doi.org/10.1109/ICASSP.2008.4518633</a>.","ieee":"S. Windmann and R. Haeb-Umbach, “Modeling the dynamics of speech and noise for speech feature enhancement in ASR,” in <i>IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2008)</i>, 2008, pp. 4409–4412.","ama":"Windmann S, Haeb-Umbach R. Modeling the dynamics of speech and noise for speech feature enhancement in ASR. In: <i>IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2008)</i>. ; 2008:4409-4412. doi:<a href=\"https://doi.org/10.1109/ICASSP.2008.4518633\">10.1109/ICASSP.2008.4518633</a>","apa":"Windmann, S., &#38; Haeb-Umbach, R. (2008). Modeling the dynamics of speech and noise for speech feature enhancement in ASR. In <i>IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2008)</i> (pp. 4409–4412). <a href=\"https://doi.org/10.1109/ICASSP.2008.4518633\">https://doi.org/10.1109/ICASSP.2008.4518633</a>","short":"S. Windmann, R. Haeb-Umbach, in: IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2008), 2008, pp. 4409–4412.","mla":"Windmann, Stefan, and Reinhold Haeb-Umbach. “Modeling the Dynamics of Speech and Noise for Speech Feature Enhancement in ASR.” <i>IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2008)</i>, 2008, pp. 4409–12, doi:<a href=\"https://doi.org/10.1109/ICASSP.2008.4518633\">10.1109/ICASSP.2008.4518633</a>.","bibtex":"@inproceedings{Windmann_Haeb-Umbach_2008, title={Modeling the dynamics of speech and noise for speech feature enhancement in ASR}, DOI={<a href=\"https://doi.org/10.1109/ICASSP.2008.4518633\">10.1109/ICASSP.2008.4518633</a>}, booktitle={IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2008)}, author={Windmann, Stefan and Haeb-Umbach, Reinhold}, year={2008}, pages={4409–4412} }"},"status":"public","language":[{"iso":"eng"}],"year":"2008","type":"conference","date_created":"2019-07-12T05:31:11Z","publication":"IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2008)","date_updated":"2022-01-06T06:51:12Z","page":"4409-4412","_id":"11939"},{"author":[{"first_name":"Manja","full_name":"Lohse, Manja","last_name":"Lohse"},{"last_name":"Rohlfing","id":"50352","full_name":"Rohlfing, Katharina","first_name":"Katharina"},{"last_name":"Wrede","full_name":"Wrede, Britta","first_name":"Britta"},{"last_name":"Sagerer","first_name":"Gerhard","full_name":"Sagerer, Gerhard"}],"title":"“Try something else!” — When users change their discursive behavior in human-robot interaction","abstract":[{"text":"This paper investigates the influence of feedback provided by an autonomous robot (BIRON) on users’ discursive behavior. A user study is described during which users show objects to the robot. The results of the experiment indicate, that the robot’s verbal feedback utterances cause the humans to adapt their own way of speaking. The changes in users’ verbal behavior are due to their beliefs about the robots knowledge and abilities. In this paper they are identified and grouped. Moreover, the data implies variations in user behavior regarding gestures. Unlike speech, the robot was not able to give feedback with gestures. Due to the lack of feedback, users did not seem to have a consistent mental representation of the robot’s abilities to recognize gestures. As a result, changes between different gestures are interpreted to be unconscious variations accompanying speech.","lang":"eng"}],"doi":"10.1109/ROBOT.2008.4543743","keyword":["discursive behavior","autonomous robot","BIRON","man-machine systems","robot abilities","robot knowledge","user gestures","robot verbal feedback utterance","speech processing","user verbal behavior","service robots","human-robot interaction","human computer interaction","gesture recognition"],"user_id":"14931","citation":{"mla":"Lohse, Manja, et al. <i>“Try Something Else!” — When Users Change Their Discursive Behavior in Human-Robot Interaction</i>. 2008, pp. 3481–86, doi:<a href=\"https://doi.org/10.1109/ROBOT.2008.4543743\">10.1109/ROBOT.2008.4543743</a>.","bibtex":"@inproceedings{Lohse_Rohlfing_Wrede_Sagerer_2008, title={“Try something else!” — When users change their discursive behavior in human-robot interaction}, DOI={<a href=\"https://doi.org/10.1109/ROBOT.2008.4543743\">10.1109/ROBOT.2008.4543743</a>}, author={Lohse, Manja and Rohlfing, Katharina and Wrede, Britta and Sagerer, Gerhard}, year={2008}, pages={3481–3486} }","short":"M. Lohse, K. Rohlfing, B. Wrede, G. Sagerer, in: 2008, pp. 3481–3486.","ama":"Lohse M, Rohlfing K, Wrede B, Sagerer G. “Try something else!” — When users change their discursive behavior in human-robot interaction. In: ; 2008:3481-3486. doi:<a href=\"https://doi.org/10.1109/ROBOT.2008.4543743\">10.1109/ROBOT.2008.4543743</a>","apa":"Lohse, M., Rohlfing, K., Wrede, B., &#38; Sagerer, G. (2008). <i>“Try something else!” — When users change their discursive behavior in human-robot interaction</i>. 3481–3486. <a href=\"https://doi.org/10.1109/ROBOT.2008.4543743\">https://doi.org/10.1109/ROBOT.2008.4543743</a>","chicago":"Lohse, Manja, Katharina Rohlfing, Britta Wrede, and Gerhard Sagerer. “‘Try Something Else!’ — When Users Change Their Discursive Behavior in Human-Robot Interaction,” 3481–86, 2008. <a href=\"https://doi.org/10.1109/ROBOT.2008.4543743\">https://doi.org/10.1109/ROBOT.2008.4543743</a>.","ieee":"M. Lohse, K. Rohlfing, B. Wrede, and G. Sagerer, “‘Try something else!’ — When users change their discursive behavior in human-robot interaction,” 2008, pp. 3481–3486, doi: <a href=\"https://doi.org/10.1109/ROBOT.2008.4543743\">10.1109/ROBOT.2008.4543743</a>."},"department":[{"_id":"749"}],"date_created":"2020-06-24T13:02:49Z","status":"public","language":[{"iso":"eng"}],"type":"conference","publication_identifier":{"isbn":["1050-4729"]},"year":"2008","page":"3481-3486","_id":"17278","date_updated":"2023-02-01T13:08:20Z"}]
