[{"date_updated":"2023-11-22T13:44:59Z","oa":"1","author":[{"full_name":"Wiechmann, Jana","last_name":"Wiechmann","first_name":"Jana"},{"first_name":"Frederik","id":"72602","full_name":"Rautenberg, Frederik","last_name":"Rautenberg"},{"first_name":"Petra","last_name":"Wagner","full_name":"Wagner, Petra"},{"last_name":"Haeb-Umbach","full_name":"Haeb-Umbach, Reinhold","id":"242","first_name":"Reinhold"}],"date_created":"2023-10-24T08:05:40Z","title":"Explaining voice characteristics to novice voice practitioners-How successful is it?","main_file_link":[{"open_access":"1"}],"conference":{"start_date":"2023-08-07","end_date":"2023-08-11"},"has_accepted_license":"1","year":"2023","citation":{"apa":"Wiechmann, J., Rautenberg, F., Wagner, P., &#38; Haeb-Umbach, R. (2023). Explaining voice characteristics to novice voice practitioners-How successful is it? <i>20th International Congress of the Phonetic Sciences (ICPhS) </i>.","bibtex":"@inproceedings{Wiechmann_Rautenberg_Wagner_Haeb-Umbach_2023, title={Explaining voice characteristics to novice voice practitioners-How successful is it?}, booktitle={20th International Congress of the Phonetic Sciences (ICPhS) }, author={Wiechmann, Jana and Rautenberg, Frederik and Wagner, Petra and Haeb-Umbach, Reinhold}, year={2023} }","mla":"Wiechmann, Jana, et al. “Explaining Voice Characteristics to Novice Voice Practitioners-How Successful Is It?” <i>20th International Congress of the Phonetic Sciences (ICPhS) </i>, 2023.","short":"J. Wiechmann, F. Rautenberg, P. Wagner, R. Haeb-Umbach, in: 20th International Congress of the Phonetic Sciences (ICPhS) , 2023.","chicago":"Wiechmann, Jana, Frederik Rautenberg, Petra Wagner, and Reinhold Haeb-Umbach. “Explaining Voice Characteristics to Novice Voice Practitioners-How Successful Is It?” In <i>20th International Congress of the Phonetic Sciences (ICPhS) </i>, 2023.","ieee":"J. Wiechmann, F. Rautenberg, P. Wagner, and R. Haeb-Umbach, “Explaining voice characteristics to novice voice practitioners-How successful is it?,” 2023.","ama":"Wiechmann J, Rautenberg F, Wagner P, Haeb-Umbach R. Explaining voice characteristics to novice voice practitioners-How successful is it? In: <i>20th International Congress of the Phonetic Sciences (ICPhS) </i>. ; 2023."},"project":[{"grant_number":"438445824","name":"TRR 318 - C06: TRR 318 - Technisch unterstütztes Erklären von Stimmcharakteristika (Teilprojekt C06)","_id":"129"}],"_id":"48410","user_id":"72602","department":[{"_id":"54"},{"_id":"660"}],"ddc":["040"],"language":[{"iso":"eng"}],"file_date_updated":"2023-10-24T08:03:27Z","type":"conference","publication":"20th International Congress of the Phonetic Sciences (ICPhS) ","file":[{"access_level":"closed","file_id":"48413","file_name":"188.pdf","file_size":209980,"date_created":"2023-10-24T08:03:27Z","creator":"frra","date_updated":"2023-10-24T08:03:27Z","relation":"main_file","success":1,"content_type":"application/pdf"}],"status":"public"},{"status":"public","publication":"12th Speech Synthesis Workshop (SSW) 2023","type":"conference","language":[{"iso":"eng"}],"department":[{"_id":"54"}],"user_id":"242","_id":"46069","project":[{"grant_number":"438445824","name":"TRR 318 - C06: TRR 318 - Technisch unterstütztes Erklären von Stimmcharakteristika (Teilprojekt C06)","_id":"129"}],"citation":{"ieee":"F. Seebauer, M. Kuhlmann, R. Haeb-Umbach, and P. Wagner, “Re-examining the quality dimensions of synthetic speech,” 2023.","chicago":"Seebauer, Fritz, Michael Kuhlmann, Reinhold Haeb-Umbach, and Petra Wagner. “Re-Examining the Quality Dimensions of Synthetic Speech.” In <i>12th Speech Synthesis Workshop (SSW) 2023</i>, 2023.","ama":"Seebauer F, Kuhlmann M, Haeb-Umbach R, Wagner P. Re-examining the quality dimensions of synthetic speech. In: <i>12th Speech Synthesis Workshop (SSW) 2023</i>. ; 2023.","apa":"Seebauer, F., Kuhlmann, M., Haeb-Umbach, R., &#38; Wagner, P. (2023). Re-examining the quality dimensions of synthetic speech. <i>12th Speech Synthesis Workshop (SSW) 2023</i>.","short":"F. Seebauer, M. Kuhlmann, R. Haeb-Umbach, P. Wagner, in: 12th Speech Synthesis Workshop (SSW) 2023, 2023.","bibtex":"@inproceedings{Seebauer_Kuhlmann_Haeb-Umbach_Wagner_2023, title={Re-examining the quality dimensions of synthetic speech}, booktitle={12th Speech Synthesis Workshop (SSW) 2023}, author={Seebauer, Fritz and Kuhlmann, Michael and Haeb-Umbach, Reinhold and Wagner, Petra}, year={2023} }","mla":"Seebauer, Fritz, et al. “Re-Examining the Quality Dimensions of Synthetic Speech.” <i>12th Speech Synthesis Workshop (SSW) 2023</i>, 2023."},"year":"2023","has_accepted_license":"1","title":"Re-examining the quality dimensions of synthetic speech","date_created":"2023-07-15T16:10:20Z","author":[{"first_name":"Fritz","last_name":"Seebauer","full_name":"Seebauer, Fritz"},{"first_name":"Michael","full_name":"Kuhlmann, Michael","id":"49871","last_name":"Kuhlmann"},{"last_name":"Haeb-Umbach","id":"242","full_name":"Haeb-Umbach, Reinhold","first_name":"Reinhold"},{"first_name":"Petra","full_name":"Wagner, Petra","last_name":"Wagner"}],"date_updated":"2023-10-25T08:42:56Z"},{"status":"public","type":"journal_article","file_date_updated":"2023-01-11T08:50:19Z","article_type":"original","user_id":"49870","department":[{"_id":"54"}],"project":[{"_id":"52","name":"PC2: Computing Resources Provided by the Paderborn Center for Parallel Computing"}],"_id":"35602","citation":{"apa":"von Neumann, T., Kinoshita, K., Boeddeker, C., Delcroix, M., &#38; Haeb-Umbach, R. (2023). Segment-Less Continuous Speech Separation of Meetings: Training and Evaluation Criteria. <i>IEEE/ACM Transactions on Audio, Speech, and Language Processing</i>, <i>31</i>, 576–589. <a href=\"https://doi.org/10.1109/taslp.2022.3228629\">https://doi.org/10.1109/taslp.2022.3228629</a>","short":"T. von Neumann, K. Kinoshita, C. Boeddeker, M. Delcroix, R. Haeb-Umbach, IEEE/ACM Transactions on Audio, Speech, and Language Processing 31 (2023) 576–589.","mla":"von Neumann, Thilo, et al. “Segment-Less Continuous Speech Separation of Meetings: Training and Evaluation Criteria.” <i>IEEE/ACM Transactions on Audio, Speech, and Language Processing</i>, vol. 31, Institute of Electrical and Electronics Engineers (IEEE), 2023, pp. 576–89, doi:<a href=\"https://doi.org/10.1109/taslp.2022.3228629\">10.1109/taslp.2022.3228629</a>.","bibtex":"@article{von Neumann_Kinoshita_Boeddeker_Delcroix_Haeb-Umbach_2023, title={Segment-Less Continuous Speech Separation of Meetings: Training and Evaluation Criteria}, volume={31}, DOI={<a href=\"https://doi.org/10.1109/taslp.2022.3228629\">10.1109/taslp.2022.3228629</a>}, journal={IEEE/ACM Transactions on Audio, Speech, and Language Processing}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={von Neumann, Thilo and Kinoshita, Keisuke and Boeddeker, Christoph and Delcroix, Marc and Haeb-Umbach, Reinhold}, year={2023}, pages={576–589} }","ieee":"T. von Neumann, K. Kinoshita, C. Boeddeker, M. Delcroix, and R. Haeb-Umbach, “Segment-Less Continuous Speech Separation of Meetings: Training and Evaluation Criteria,” <i>IEEE/ACM Transactions on Audio, Speech, and Language Processing</i>, vol. 31, pp. 576–589, 2023, doi: <a href=\"https://doi.org/10.1109/taslp.2022.3228629\">10.1109/taslp.2022.3228629</a>.","chicago":"Neumann, Thilo von, Keisuke Kinoshita, Christoph Boeddeker, Marc Delcroix, and Reinhold Haeb-Umbach. “Segment-Less Continuous Speech Separation of Meetings: Training and Evaluation Criteria.” <i>IEEE/ACM Transactions on Audio, Speech, and Language Processing</i> 31 (2023): 576–89. <a href=\"https://doi.org/10.1109/taslp.2022.3228629\">https://doi.org/10.1109/taslp.2022.3228629</a>.","ama":"von Neumann T, Kinoshita K, Boeddeker C, Delcroix M, Haeb-Umbach R. Segment-Less Continuous Speech Separation of Meetings: Training and Evaluation Criteria. <i>IEEE/ACM Transactions on Audio, Speech, and Language Processing</i>. 2023;31:576-589. doi:<a href=\"https://doi.org/10.1109/taslp.2022.3228629\">10.1109/taslp.2022.3228629</a>"},"intvolume":"        31","page":"576-589","publication_status":"published","has_accepted_license":"1","publication_identifier":{"issn":["2329-9290","2329-9304"]},"doi":"10.1109/taslp.2022.3228629","author":[{"first_name":"Thilo","full_name":"von Neumann, Thilo","id":"49870","orcid":"https://orcid.org/0000-0002-7717-8670","last_name":"von Neumann"},{"last_name":"Kinoshita","full_name":"Kinoshita, Keisuke","first_name":"Keisuke"},{"last_name":"Boeddeker","full_name":"Boeddeker, Christoph","id":"40767","first_name":"Christoph"},{"first_name":"Marc","full_name":"Delcroix, Marc","last_name":"Delcroix"},{"id":"242","full_name":"Haeb-Umbach, Reinhold","last_name":"Haeb-Umbach","first_name":"Reinhold"}],"volume":31,"date_updated":"2023-11-15T12:16:11Z","oa":"1","file":[{"date_created":"2023-01-09T17:46:05Z","creator":"haebumb","date_updated":"2023-01-11T08:50:19Z","access_level":"open_access","file_id":"35607","file_name":"main.pdf","file_size":7185077,"content_type":"application/pdf","relation":"main_file"}],"abstract":[{"lang":"eng","text":"Continuous Speech Separation (CSS) has been proposed to address speech overlaps during the analysis of realistic meeting-like conversations by eliminating any overlaps before further processing.\r\nCSS separates a recording of arbitrarily many speakers into a small number of overlap-free output channels, where each output channel may contain speech of multiple speakers.\r\nThis is often done by applying a conventional separation model trained with Utterance-level Permutation Invariant Training (uPIT), which exclusively maps a speaker to an output channel, in sliding window approach called stitching.\r\nRecently, we introduced an alternative training scheme called Graph-PIT that teaches the separation network to directly produce output streams in the required format without stitching.\r\nIt can handle an arbitrary number of speakers as long as never more of them overlap at the same time than the separator has output channels.\r\nIn this contribution, we further investigate the Graph-PIT training scheme.\r\nWe show in extended experiments that models trained with Graph-PIT also work in challenging reverberant conditions.\r\nModels trained in this way are able to perform segment-less CSS, i.e., without stitching, and achieve comparable and often better separation quality than the conventional CSS with uPIT and stitching.\r\nWe simplify the training schedule for Graph-PIT with the recently proposed Source Aggregated Signal-to-Distortion Ratio (SA-SDR) loss.\r\nIt eliminates unfavorable properties of the previously used A-SDR loss and thus enables training with Graph-PIT from scratch.\r\nGraph-PIT training relaxes the constraints w.r.t. the allowed numbers of speakers and speaking patterns which allows using a larger variety of training data.\r\nFurthermore, we introduce novel signal-level evaluation metrics for meeting scenarios, namely the source-aggregated scale- and convolution-invariant Signal-to-Distortion Ratio (SA-SI-SDR and SA-CI-SDR), which are generalizations of the commonly used SDR-based metrics for the CSS case."}],"publication":"IEEE/ACM Transactions on Audio, Speech, and Language Processing","language":[{"iso":"eng"}],"ddc":["000"],"keyword":["Continuous Speech Separation","Source Separation","Graph-PIT","Dynamic Programming","Permutation Invariant Training"],"year":"2023","quality_controlled":"1","title":"Segment-Less Continuous Speech Separation of Meetings: Training and Evaluation Criteria","date_created":"2023-01-09T17:24:17Z","publisher":"Institute of Electrical and Electronics Engineers (IEEE)"},{"file":[{"file_id":"49110","access_level":"open_access","file_name":"asilomar.pdf","file_size":212317,"date_created":"2023-11-22T07:51:18Z","creator":"schmalen","date_updated":"2023-11-22T07:58:49Z","relation":"main_file","content_type":"application/pdf"}],"abstract":[{"text":"We propose a diarization system, that estimates “who spoke when” based on spatial information, to be used as a front-end of a meeting transcription system running on the signals gathered from an acoustic sensor network (ASN). Although the\r\nspatial distribution of the microphones is advantageous, exploiting the spatial diversity for diarization and signal enhancement is challenging, because the microphones’ positions are typically unknown, and the recorded signals are initially unsynchronized in general. Here, we approach these issues by first blindly synchronizing the signals and then estimating time differences of arrival (TDOAs). The TDOA information is exploited to estimate the speakers’ activity, even in the presence of multiple speakers being simultaneously active. This speaker activity information serves as a guide for a spatial mixture model, on which basis the individual speaker’s signals are extracted via beamforming. Finally, the extracted signals are forwarded to a speech recognizer. Additionally, a novel initialization scheme for spatial mixture models based on the TDOA estimates is proposed. Experiments conducted on real recordings from the LibriWASN data set have shown that our proposed system is advantageous compared to a system using a spatial mixture model, which does not make use\r\nof external diarization information.","lang":"eng"}],"publication":"Proc. Asilomar Conference on Signals, Systems, and Computers","language":[{"iso":"eng"}],"ddc":["004"],"keyword":["Diarization","time difference of arrival","ad-hoc acoustic sensor network","meeting transcription"],"year":"2023","quality_controlled":"1","title":"Spatial Diarization for Meeting Transcription with Ad-Hoc Acoustic Sensor Networks","date_created":"2023-11-22T07:52:29Z","status":"public","type":"conference","file_date_updated":"2023-11-22T07:58:49Z","user_id":"460","department":[{"_id":"54"}],"_id":"49109","citation":{"apa":"Gburrek, T., Schmalenstroeer, J., &#38; Haeb-Umbach, R. (2023). Spatial Diarization for Meeting Transcription with Ad-Hoc Acoustic Sensor Networks. <i>Proc. Asilomar Conference on Signals, Systems, and Computers</i>. 57th Asilomar Conference on Signals, Systems, and Computers.","bibtex":"@inproceedings{Gburrek_Schmalenstroeer_Haeb-Umbach_2023, title={Spatial Diarization for Meeting Transcription with Ad-Hoc Acoustic Sensor Networks}, booktitle={Proc. Asilomar Conference on Signals, Systems, and Computers}, author={Gburrek, Tobias and Schmalenstroeer, Joerg and Haeb-Umbach, Reinhold}, year={2023} }","mla":"Gburrek, Tobias, et al. “Spatial Diarization for Meeting Transcription with Ad-Hoc Acoustic Sensor Networks.” <i>Proc. Asilomar Conference on Signals, Systems, and Computers</i>, 2023.","short":"T. Gburrek, J. Schmalenstroeer, R. Haeb-Umbach, in: Proc. Asilomar Conference on Signals, Systems, and Computers, 2023.","ama":"Gburrek T, Schmalenstroeer J, Haeb-Umbach R. Spatial Diarization for Meeting Transcription with Ad-Hoc Acoustic Sensor Networks. In: <i>Proc. Asilomar Conference on Signals, Systems, and Computers</i>. ; 2023.","ieee":"T. Gburrek, J. Schmalenstroeer, and R. Haeb-Umbach, “Spatial Diarization for Meeting Transcription with Ad-Hoc Acoustic Sensor Networks,” presented at the 57th Asilomar Conference on Signals, Systems, and Computers, 2023.","chicago":"Gburrek, Tobias, Joerg Schmalenstroeer, and Reinhold Haeb-Umbach. “Spatial Diarization for Meeting Transcription with Ad-Hoc Acoustic Sensor Networks.” In <i>Proc. Asilomar Conference on Signals, Systems, and Computers</i>, 2023."},"has_accepted_license":"1","conference":{"end_date":"2023-11-01","start_date":"2023-10-31","name":"57th Asilomar Conference on Signals, Systems, and Computers"},"author":[{"last_name":"Gburrek","full_name":"Gburrek, Tobias","id":"44006","first_name":"Tobias"},{"last_name":"Schmalenstroeer","full_name":"Schmalenstroeer, Joerg","id":"460","first_name":"Joerg"},{"first_name":"Reinhold","last_name":"Haeb-Umbach","full_name":"Haeb-Umbach, Reinhold","id":"242"}],"date_updated":"2023-11-22T07:58:49Z","oa":"1"},{"title":"Speech Disentanglement for Analysis and Modification of Acoustic and Perceptual Speaker Characteristics","main_file_link":[{"open_access":"1","url":"https://pub.dega-akustik.de/DAGA_2023/data/articles/000105.pdf"}],"conference":{"end_date":"2023-03-09","location":"Hamburg","name":"DAGA 2023 - 49. Jahrestagung für Akustik","start_date":"2023-03-06"},"oa":"1","date_updated":"2024-02-29T17:05:16Z","author":[{"first_name":"Frederik","last_name":"Rautenberg","full_name":"Rautenberg, Frederik","id":"72602"},{"first_name":"Michael","last_name":"Kuhlmann","id":"49871","full_name":"Kuhlmann, Michael"},{"first_name":"Janek","id":"34851","full_name":"Ebbers, Janek","last_name":"Ebbers"},{"first_name":"Jana","full_name":"Wiechmann, Jana","last_name":"Wiechmann"},{"first_name":"Fritz","full_name":"Seebauer, Fritz","last_name":"Seebauer"},{"first_name":"Petra","full_name":"Wagner, Petra","last_name":"Wagner"},{"id":"242","full_name":"Haeb-Umbach, Reinhold","last_name":"Haeb-Umbach","first_name":"Reinhold"}],"date_created":"2023-05-15T08:48:54Z","year":"2023","citation":{"bibtex":"@inproceedings{Rautenberg_Kuhlmann_Ebbers_Wiechmann_Seebauer_Wagner_Haeb-Umbach_2023, title={Speech Disentanglement for Analysis and Modification of Acoustic and Perceptual Speaker Characteristics}, booktitle={Fortschritte der Akustik - DAGA 2023}, author={Rautenberg, Frederik and Kuhlmann, Michael and Ebbers, Janek and Wiechmann, Jana and Seebauer, Fritz and Wagner, Petra and Haeb-Umbach, Reinhold}, year={2023}, pages={1409–1412} }","mla":"Rautenberg, Frederik, et al. “Speech Disentanglement for Analysis and Modification of Acoustic and Perceptual Speaker Characteristics.” <i>Fortschritte Der Akustik - DAGA 2023</i>, 2023, pp. 1409–12.","short":"F. Rautenberg, M. Kuhlmann, J. Ebbers, J. Wiechmann, F. Seebauer, P. Wagner, R. Haeb-Umbach, in: Fortschritte Der Akustik - DAGA 2023, 2023, pp. 1409–1412.","apa":"Rautenberg, F., Kuhlmann, M., Ebbers, J., Wiechmann, J., Seebauer, F., Wagner, P., &#38; Haeb-Umbach, R. (2023). Speech Disentanglement for Analysis and Modification of Acoustic and Perceptual Speaker Characteristics. <i>Fortschritte Der Akustik - DAGA 2023</i>, 1409–1412.","ieee":"F. Rautenberg <i>et al.</i>, “Speech Disentanglement for Analysis and Modification of Acoustic and Perceptual Speaker Characteristics,” in <i>Fortschritte der Akustik - DAGA 2023</i>, Hamburg, 2023, pp. 1409–1412.","chicago":"Rautenberg, Frederik, Michael Kuhlmann, Janek Ebbers, Jana Wiechmann, Fritz Seebauer, Petra Wagner, and Reinhold Haeb-Umbach. “Speech Disentanglement for Analysis and Modification of Acoustic and Perceptual Speaker Characteristics.” In <i>Fortschritte Der Akustik - DAGA 2023</i>, 1409–12, 2023.","ama":"Rautenberg F, Kuhlmann M, Ebbers J, et al. Speech Disentanglement for Analysis and Modification of Acoustic and Perceptual Speaker Characteristics. In: <i>Fortschritte Der Akustik - DAGA 2023</i>. ; 2023:1409-1412."},"page":"1409-1412","publication_status":"published","has_accepted_license":"1","ddc":["000"],"language":[{"iso":"eng"}],"file_date_updated":"2024-02-29T16:15:12Z","project":[{"name":"TRR 318 - C06: TRR 318 - Technisch unterstütztes Erklären von Stimmcharakteristika (Teilprojekt C06)","_id":"129","grant_number":"438445824"}],"_id":"44849","user_id":"72602","department":[{"_id":"54"},{"_id":"660"}],"file":[{"relation":"main_file","content_type":"application/pdf","file_size":289493,"file_id":"52221","access_level":"open_access","file_name":"Daga_2023_Rautenberg_Paper.pdf","date_updated":"2024-02-29T16:15:12Z","date_created":"2024-02-29T16:15:12Z","creator":"frra"}],"status":"public","type":"conference","publication":"Fortschritte der Akustik - DAGA 2023"},{"publication":"Proceedings of the 8th Detection and Classification of Acoustic Scenes and Events 2023 Workshop (DCASE2023)","type":"conference","abstract":[{"text":"Due to the high variation in the application requirements of sound event detection (SED) systems, it is not sufficient to evaluate systems only in a single operating mode. Therefore, the community recently adopted the polyphonic sound detection score (PSDS) as an evaluation metric, which is the normalized area under the PSD receiver operating characteristic (PSD-ROC). It summarizes the system performance over a range of operating modes resulting from varying the decision threshold that is used to translate the system output scores into a binary detection output. Hence, it provides a more complete picture of the overall system behavior and is less biased by specific threshold tuning. However, besides the decision threshold there is also the post-processing that can be changed to enter another operating mode. In this paper we propose the post-processing independent PSDS (piPSDS) as a generalization of the PSDS. Here, the post-processing independent PSD-ROC includes operating points from varying post-processings with varying decision thresholds. Thus, it summarizes even more operating modes of an SED system and allows for system comparison without the need of implementing a post-processing and without a bias due to different post-processings. While piPSDS can in principle combine different types of post-processing, we here, as a first step, present median filter independent PSDS (miPSDS) results for this year’s DCASE Challenge Task4a systems. Source code is publicly available in our sed_scores_eval package (https://github.com/fgnt/sed_scores_eval).","lang":"eng"}],"status":"public","file":[{"content_type":"application/pdf","success":1,"relation":"main_file","date_updated":"2023-11-22T08:25:08Z","creator":"ebbers","date_created":"2023-11-22T08:25:08Z","file_size":221875,"file_name":"dcase2023_ebbers.pdf","file_id":"49112","access_level":"closed"}],"_id":"49111","project":[{"name":"PC2: Computing Resources Provided by the Paderborn Center for Parallel Computing","_id":"52"}],"department":[{"_id":"54"}],"user_id":"34851","ddc":["000"],"language":[{"iso":"eng"}],"file_date_updated":"2023-11-22T08:25:08Z","has_accepted_license":"1","quality_controlled":"1","place":"Tampere, Finland","year":"2023","page":"36–40","citation":{"ieee":"J. Ebbers, R. Haeb-Umbach, and R. Serizel, “Post-Processing Independent Evaluation of Sound Event Detection Systems,” in <i>Proceedings of the 8th Detection and Classification of Acoustic Scenes and Events 2023 Workshop (DCASE2023)</i>, 2023, pp. 36–40.","chicago":"Ebbers, Janek, Reinhold Haeb-Umbach, and Romain Serizel. “Post-Processing Independent Evaluation of Sound Event Detection Systems.” In <i>Proceedings of the 8th Detection and Classification of Acoustic Scenes and Events 2023 Workshop (DCASE2023)</i>, 36–40. Tampere, Finland, 2023.","ama":"Ebbers J, Haeb-Umbach R, Serizel R. Post-Processing Independent Evaluation of Sound Event Detection Systems. In: <i>Proceedings of the 8th Detection and Classification of Acoustic Scenes and Events 2023 Workshop (DCASE2023)</i>. ; 2023:36–40.","apa":"Ebbers, J., Haeb-Umbach, R., &#38; Serizel, R. (2023). Post-Processing Independent Evaluation of Sound Event Detection Systems. <i>Proceedings of the 8th Detection and Classification of Acoustic Scenes and Events 2023 Workshop (DCASE2023)</i>, 36–40.","mla":"Ebbers, Janek, et al. “Post-Processing Independent Evaluation of Sound Event Detection Systems.” <i>Proceedings of the 8th Detection and Classification of Acoustic Scenes and Events 2023 Workshop (DCASE2023)</i>, 2023, pp. 36–40.","short":"J. Ebbers, R. Haeb-Umbach, R. Serizel, in: Proceedings of the 8th Detection and Classification of Acoustic Scenes and Events 2023 Workshop (DCASE2023), Tampere, Finland, 2023, pp. 36–40.","bibtex":"@inproceedings{Ebbers_Haeb-Umbach_Serizel_2023, place={Tampere, Finland}, title={Post-Processing Independent Evaluation of Sound Event Detection Systems}, booktitle={Proceedings of the 8th Detection and Classification of Acoustic Scenes and Events 2023 Workshop (DCASE2023)}, author={Ebbers, Janek and Haeb-Umbach, Reinhold and Serizel, Romain}, year={2023}, pages={36–40} }"},"date_updated":"2024-11-15T20:34:18Z","date_created":"2023-11-22T08:20:26Z","author":[{"first_name":"Janek","last_name":"Ebbers","id":"34851","full_name":"Ebbers, Janek"},{"first_name":"Reinhold","last_name":"Haeb-Umbach","full_name":"Haeb-Umbach, Reinhold","id":"242"},{"first_name":"Romain","last_name":"Serizel","full_name":"Serizel, Romain"}],"title":"Post-Processing Independent Evaluation of Sound Event Detection Systems"},{"user_id":"49871","department":[{"_id":"54"}],"_id":"57098","language":[{"iso":"eng"}],"type":"conference","publication":"Proceedings of the 20th International Congress of Phonetic Sciences","status":"public","author":[{"full_name":"Seebauer, Fritz","last_name":"Seebauer","first_name":"Fritz"},{"first_name":"Michael","full_name":"Kuhlmann, Michael","id":"49871","last_name":"Kuhlmann"},{"full_name":"Häb-Umbach, Reinhold","id":"242","last_name":"Häb-Umbach","first_name":"Reinhold"},{"last_name":"Wagner","full_name":"Wagner, Petra","first_name":"Petra"}],"date_created":"2024-11-15T06:49:27Z","date_updated":"2024-11-15T06:54:55Z","conference":{"location":"Prague","end_date":"2023-08-11","start_date":"2023-08-07","name":"International Congress of Phonetic Sciences (ICPhS)"},"title":"DISCERNING DIMENSIONS OF QUALITY FOR STATE OF THE ART SYNTHETIC SPEECH","publication_identifier":{"isbn":["978-80-908 114-2-3"]},"citation":{"ama":"Seebauer F, Kuhlmann M, Häb-Umbach R, Wagner P. DISCERNING DIMENSIONS OF QUALITY FOR STATE OF THE ART SYNTHETIC SPEECH. In: <i>Proceedings of the 20th International Congress of Phonetic Sciences</i>. ; 2023.","chicago":"Seebauer, Fritz, Michael Kuhlmann, Reinhold Häb-Umbach, and Petra Wagner. “DISCERNING DIMENSIONS OF QUALITY FOR STATE OF THE ART SYNTHETIC SPEECH.” In <i>Proceedings of the 20th International Congress of Phonetic Sciences</i>, 2023.","ieee":"F. Seebauer, M. Kuhlmann, R. Häb-Umbach, and P. Wagner, “DISCERNING DIMENSIONS OF QUALITY FOR STATE OF THE ART SYNTHETIC SPEECH,” presented at the International Congress of Phonetic Sciences (ICPhS), Prague, 2023.","apa":"Seebauer, F., Kuhlmann, M., Häb-Umbach, R., &#38; Wagner, P. (2023). DISCERNING DIMENSIONS OF QUALITY FOR STATE OF THE ART SYNTHETIC SPEECH. <i>Proceedings of the 20th International Congress of Phonetic Sciences</i>. International Congress of Phonetic Sciences (ICPhS), Prague.","short":"F. Seebauer, M. Kuhlmann, R. Häb-Umbach, P. Wagner, in: Proceedings of the 20th International Congress of Phonetic Sciences, 2023.","mla":"Seebauer, Fritz, et al. “DISCERNING DIMENSIONS OF QUALITY FOR STATE OF THE ART SYNTHETIC SPEECH.” <i>Proceedings of the 20th International Congress of Phonetic Sciences</i>, 2023.","bibtex":"@inproceedings{Seebauer_Kuhlmann_Häb-Umbach_Wagner_2023, title={DISCERNING DIMENSIONS OF QUALITY FOR STATE OF THE ART SYNTHETIC SPEECH}, booktitle={Proceedings of the 20th International Congress of Phonetic Sciences}, author={Seebauer, Fritz and Kuhlmann, Michael and Häb-Umbach, Reinhold and Wagner, Petra}, year={2023} }"},"year":"2023"},{"status":"public","type":"conference","file_date_updated":"2023-10-19T07:41:56Z","user_id":"40767","department":[{"_id":"54"}],"project":[{"_id":"52","name":"PC2: Computing Resources Provided by the Paderborn Center for Parallel Computing"},{"_id":"508","name":"Automatische Transkription von Gesprächssituationen","grant_number":"448568305"}],"_id":"48281","citation":{"apa":"von Neumann, T., Boeddeker, C., Kinoshita, K., Delcroix, M., &#38; Haeb-Umbach, R. (2023). On Word Error Rate Definitions and Their Efficient Computation for Multi-Speaker Speech Recognition Systems. <i>ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)</i>. <a href=\"https://doi.org/10.1109/icassp49357.2023.10094784\">https://doi.org/10.1109/icassp49357.2023.10094784</a>","short":"T. von Neumann, C. Boeddeker, K. Kinoshita, M. Delcroix, R. Haeb-Umbach, in: ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), IEEE, 2023.","mla":"von Neumann, Thilo, et al. “On Word Error Rate Definitions and Their Efficient Computation for Multi-Speaker Speech Recognition Systems.” <i>ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)</i>, IEEE, 2023, doi:<a href=\"https://doi.org/10.1109/icassp49357.2023.10094784\">10.1109/icassp49357.2023.10094784</a>.","bibtex":"@inproceedings{von Neumann_Boeddeker_Kinoshita_Delcroix_Haeb-Umbach_2023, title={On Word Error Rate Definitions and Their Efficient Computation for Multi-Speaker Speech Recognition Systems}, DOI={<a href=\"https://doi.org/10.1109/icassp49357.2023.10094784\">10.1109/icassp49357.2023.10094784</a>}, booktitle={ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)}, publisher={IEEE}, author={von Neumann, Thilo and Boeddeker, Christoph and Kinoshita, Keisuke and Delcroix, Marc and Haeb-Umbach, Reinhold}, year={2023} }","ama":"von Neumann T, Boeddeker C, Kinoshita K, Delcroix M, Haeb-Umbach R. On Word Error Rate Definitions and Their Efficient Computation for Multi-Speaker Speech Recognition Systems. In: <i>ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)</i>. IEEE; 2023. doi:<a href=\"https://doi.org/10.1109/icassp49357.2023.10094784\">10.1109/icassp49357.2023.10094784</a>","chicago":"Neumann, Thilo von, Christoph Boeddeker, Keisuke Kinoshita, Marc Delcroix, and Reinhold Haeb-Umbach. “On Word Error Rate Definitions and Their Efficient Computation for Multi-Speaker Speech Recognition Systems.” In <i>ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)</i>. IEEE, 2023. <a href=\"https://doi.org/10.1109/icassp49357.2023.10094784\">https://doi.org/10.1109/icassp49357.2023.10094784</a>.","ieee":"T. von Neumann, C. Boeddeker, K. Kinoshita, M. Delcroix, and R. Haeb-Umbach, “On Word Error Rate Definitions and Their Efficient Computation for Multi-Speaker Speech Recognition Systems,” 2023, doi: <a href=\"https://doi.org/10.1109/icassp49357.2023.10094784\">10.1109/icassp49357.2023.10094784</a>."},"related_material":{"link":[{"relation":"software","url":"https://github.com/fgnt/meeteval"}]},"publication_status":"published","has_accepted_license":"1","main_file_link":[{"url":"https://ieeexplore.ieee.org/document/10094784"}],"doi":"10.1109/icassp49357.2023.10094784","author":[{"last_name":"von Neumann","orcid":"https://orcid.org/0000-0002-7717-8670","id":"49870","full_name":"von Neumann, Thilo","first_name":"Thilo"},{"last_name":"Boeddeker","id":"40767","full_name":"Boeddeker, Christoph","first_name":"Christoph"},{"last_name":"Kinoshita","full_name":"Kinoshita, Keisuke","first_name":"Keisuke"},{"full_name":"Delcroix, Marc","last_name":"Delcroix","first_name":"Marc"},{"last_name":"Haeb-Umbach","id":"242","full_name":"Haeb-Umbach, Reinhold","first_name":"Reinhold"}],"oa":"1","date_updated":"2025-02-12T09:16:34Z","file":[{"file_name":"ICASSP_2023_Meeting_Evaluation.pdf","access_level":"open_access","file_id":"48282","file_size":204994,"date_created":"2023-10-19T07:39:57Z","creator":"tvn","date_updated":"2023-10-19T07:41:56Z","relation":"main_file","content_type":"application/pdf"}],"abstract":[{"lang":"eng","text":"\tWe propose a general framework to compute the word error rate (WER) of ASR systems that process recordings containing multiple speakers at their input and that produce multiple output word sequences (MIMO).\r\n\tSuch ASR systems are typically required, e.g., for meeting transcription.\r\n\tWe provide an efficient implementation based on a dynamic programming search in a multi-dimensional Levenshtein distance tensor under the constraint that a reference utterance must be matched consistently with one hypothesis output. \r\n\tThis also results in an efficient implementation of the ORC WER which previously suffered from exponential complexity.\r\n\tWe give an overview of commonly used WER definitions for multi-speaker scenarios and show that they are specializations of the above MIMO WER tuned to particular application scenarios. \r\n\tWe conclude with a  discussion of the pros and cons of the various WER definitions and a recommendation when to use which."}],"publication":"ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","language":[{"iso":"eng"}],"ddc":["000"],"keyword":["Word Error Rate","Meeting Recognition","Levenshtein Distance"],"year":"2023","quality_controlled":"1","title":"On Word Error Rate Definitions and Their Efficient Computation for Multi-Speaker Speech Recognition Systems","date_created":"2023-10-19T07:38:31Z","publisher":"IEEE"},{"has_accepted_license":"1","related_material":{"link":[{"url":"https://github.com/fgnt/meeteval","relation":"software"}]},"citation":{"mla":"von Neumann, Thilo, et al. “MeetEval: A Toolkit for Computation of Word Error Rates for Meeting Transcription Systems.” <i>Proc. CHiME 2023 Workshop on Speech Processing in Everyday Environments</i>, 2023.","bibtex":"@inproceedings{von Neumann_Boeddeker_Delcroix_Haeb-Umbach_2023, title={MeetEval: A Toolkit for Computation of Word Error Rates for Meeting Transcription Systems}, booktitle={Proc. CHiME 2023 Workshop on Speech Processing in Everyday Environments}, author={von Neumann, Thilo and Boeddeker, Christoph and Delcroix, Marc and Haeb-Umbach, Reinhold}, year={2023} }","short":"T. von Neumann, C. Boeddeker, M. Delcroix, R. Haeb-Umbach, in: Proc. CHiME 2023 Workshop on Speech Processing in Everyday Environments, 2023.","apa":"von Neumann, T., Boeddeker, C., Delcroix, M., &#38; Haeb-Umbach, R. (2023). MeetEval: A Toolkit for Computation of Word Error Rates for Meeting Transcription Systems. <i>Proc. CHiME 2023 Workshop on Speech Processing in Everyday Environments</i>. CHiME 2023 Workshop on Speech Processing in Everyday Environments, Dublin.","ama":"von Neumann T, Boeddeker C, Delcroix M, Haeb-Umbach R. MeetEval: A Toolkit for Computation of Word Error Rates for Meeting Transcription Systems. In: <i>Proc. CHiME 2023 Workshop on Speech Processing in Everyday Environments</i>. ; 2023.","chicago":"Neumann, Thilo von, Christoph Boeddeker, Marc Delcroix, and Reinhold Haeb-Umbach. “MeetEval: A Toolkit for Computation of Word Error Rates for Meeting Transcription Systems.” In <i>Proc. CHiME 2023 Workshop on Speech Processing in Everyday Environments</i>, 2023.","ieee":"T. von Neumann, C. Boeddeker, M. Delcroix, and R. Haeb-Umbach, “MeetEval: A Toolkit for Computation of Word Error Rates for Meeting Transcription Systems,” presented at the CHiME 2023 Workshop on Speech Processing in Everyday Environments, Dublin, 2023."},"date_updated":"2025-02-12T09:12:05Z","oa":"1","author":[{"full_name":"von Neumann, Thilo","id":"49870","last_name":"von Neumann","orcid":"https://orcid.org/0000-0002-7717-8670","first_name":"Thilo"},{"last_name":"Boeddeker","full_name":"Boeddeker, Christoph","id":"40767","first_name":"Christoph"},{"last_name":"Delcroix","full_name":"Delcroix, Marc","first_name":"Marc"},{"id":"242","full_name":"Haeb-Umbach, Reinhold","last_name":"Haeb-Umbach","first_name":"Reinhold"}],"main_file_link":[{"url":"https://arxiv.org/abs/2307.11394","open_access":"1"}],"conference":{"name":"CHiME 2023 Workshop on Speech Processing in Everyday Environments","location":"Dublin"},"type":"conference","status":"public","project":[{"_id":"52","name":"PC2: Computing Resources Provided by the Paderborn Center for Parallel Computing"},{"grant_number":"448568305","_id":"508","name":"Automatische Transkription von Gesprächssituationen"}],"_id":"48275","user_id":"40767","department":[{"_id":"54"}],"file_date_updated":"2023-10-19T07:19:59Z","quality_controlled":"1","year":"2023","date_created":"2023-10-19T07:24:51Z","title":"MeetEval: A Toolkit for Computation of Word Error Rates for Meeting Transcription Systems","publication":"Proc. CHiME 2023 Workshop on Speech Processing in Everyday Environments","abstract":[{"text":"MeetEval is an open-source toolkit to evaluate  all kinds of meeting transcription systems.\r\nIt provides a unified interface for the computation of commonly used Word Error Rates (WERs), specifically cpWER, ORC WER and MIMO WER along other WER definitions.\r\nWe extend the cpWER computation by a temporal constraint to ensure that only words are identified as correct when the temporal alignment is plausible.\r\nThis leads to a better quality of the matching of the hypothesis string to the reference string that more closely resembles the actual transcription quality, and a system is penalized if it provides poor time annotations.\r\nSince word-level timing information is often not available, we present a way to approximate exact word-level timings from segment-level timings (e.g., a sentence) and show that the approximation leads to a similar WER as a matching with exact word-level annotations.\r\nAt the same time, the time constraint leads to a speedup of the matching algorithm, which outweighs the additional overhead caused by processing the time stamps.","lang":"eng"}],"file":[{"access_level":"open_access","file_id":"48276","file_name":"Chime_7__MeetEval.pdf","file_size":263744,"date_created":"2023-10-19T07:19:59Z","creator":"tvn","date_updated":"2023-10-19T07:19:59Z","relation":"main_file","content_type":"application/pdf"}],"ddc":["000"],"keyword":["Speech Recognition","Word Error Rate","Meeting Transcription"],"language":[{"iso":"eng"}]},{"publication_status":"published","has_accepted_license":"1","citation":{"bibtex":"@inproceedings{Cord-Landwehr_Boeddeker_Zorilă_Doddipatla_Haeb-Umbach_2023, title={Frame-Wise and Overlap-Robust Speaker Embeddings for Meeting Diarization}, DOI={<a href=\"https://doi.org/10.1109/icassp49357.2023.10095370\">10.1109/icassp49357.2023.10095370</a>}, booktitle={ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)}, publisher={IEEE}, author={Cord-Landwehr, Tobias and Boeddeker, Christoph and Zorilă, Cătălin and Doddipatla, Rama and Haeb-Umbach, Reinhold}, year={2023} }","mla":"Cord-Landwehr, Tobias, et al. “Frame-Wise and Overlap-Robust Speaker Embeddings for Meeting Diarization.” <i>ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)</i>, IEEE, 2023, doi:<a href=\"https://doi.org/10.1109/icassp49357.2023.10095370\">10.1109/icassp49357.2023.10095370</a>.","short":"T. Cord-Landwehr, C. Boeddeker, C. Zorilă, R. Doddipatla, R. Haeb-Umbach, in: ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), IEEE, 2023.","apa":"Cord-Landwehr, T., Boeddeker, C., Zorilă, C., Doddipatla, R., &#38; Haeb-Umbach, R. (2023). Frame-Wise and Overlap-Robust Speaker Embeddings for Meeting Diarization. <i>ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)</i>. 2023 IEEE International Conference on Acoustics, Speech, and Signal Processing (ICASSP), Rhodes. <a href=\"https://doi.org/10.1109/icassp49357.2023.10095370\">https://doi.org/10.1109/icassp49357.2023.10095370</a>","ama":"Cord-Landwehr T, Boeddeker C, Zorilă C, Doddipatla R, Haeb-Umbach R. Frame-Wise and Overlap-Robust Speaker Embeddings for Meeting Diarization. In: <i>ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)</i>. IEEE; 2023. doi:<a href=\"https://doi.org/10.1109/icassp49357.2023.10095370\">10.1109/icassp49357.2023.10095370</a>","chicago":"Cord-Landwehr, Tobias, Christoph Boeddeker, Cătălin Zorilă, Rama Doddipatla, and Reinhold Haeb-Umbach. “Frame-Wise and Overlap-Robust Speaker Embeddings for Meeting Diarization.” In <i>ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)</i>. IEEE, 2023. <a href=\"https://doi.org/10.1109/icassp49357.2023.10095370\">https://doi.org/10.1109/icassp49357.2023.10095370</a>.","ieee":"T. Cord-Landwehr, C. Boeddeker, C. Zorilă, R. Doddipatla, and R. Haeb-Umbach, “Frame-Wise and Overlap-Robust Speaker Embeddings for Meeting Diarization,” presented at the 2023 IEEE International Conference on Acoustics, Speech, and Signal Processing (ICASSP), Rhodes, 2023, doi: <a href=\"https://doi.org/10.1109/icassp49357.2023.10095370\">10.1109/icassp49357.2023.10095370</a>."},"oa":"1","date_updated":"2025-02-12T09:14:45Z","author":[{"first_name":"Tobias","last_name":"Cord-Landwehr","id":"44393","full_name":"Cord-Landwehr, Tobias"},{"first_name":"Christoph","last_name":"Boeddeker","id":"40767","full_name":"Boeddeker, Christoph"},{"first_name":"Cătălin","last_name":"Zorilă","full_name":"Zorilă, Cătălin"},{"last_name":"Doddipatla","full_name":"Doddipatla, Rama","first_name":"Rama"},{"first_name":"Reinhold","last_name":"Haeb-Umbach","full_name":"Haeb-Umbach, Reinhold","id":"242"}],"conference":{"name":"2023 IEEE International Conference on Acoustics, Speech, and Signal Processing (ICASSP)","location":"Rhodes"},"doi":"10.1109/icassp49357.2023.10095370","type":"conference","status":"public","project":[{"_id":"52","name":"PC2: Computing Resources Provided by the Paderborn Center for Parallel Computing"},{"grant_number":"448568305","_id":"508","name":"Automatische Transkription von Gesprächssituationen"}],"_id":"47128","user_id":"40767","department":[{"_id":"54"}],"file_date_updated":"2023-11-15T14:56:18Z","year":"2023","publisher":"IEEE","date_created":"2023-09-19T14:01:20Z","title":"Frame-Wise and Overlap-Robust Speaker Embeddings for Meeting Diarization","publication":"ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","file":[{"content_type":"application/pdf","relation":"main_file","creator":"cord","date_created":"2023-11-15T14:56:18Z","date_updated":"2023-11-15T14:56:18Z","file_name":"teacher_student_embeddings.pdf","access_level":"open_access","file_id":"48932","file_size":246306}],"ddc":["000"],"language":[{"iso":"eng"}]},{"file":[{"file_name":"multispeaker_embeddings.pdf","access_level":"open_access","file_id":"48933","file_size":303203,"date_created":"2023-11-15T15:00:02Z","creator":"cord","date_updated":"2023-11-15T15:00:02Z","relation":"main_file","content_type":"application/pdf"}],"status":"public","type":"conference","publication":"INTERSPEECH 2023","ddc":["000"],"language":[{"iso":"eng"}],"file_date_updated":"2023-11-15T15:00:02Z","project":[{"name":"PC2: Computing Resources Provided by the Paderborn Center for Parallel Computing","_id":"52"},{"grant_number":"448568305","_id":"508","name":"Automatische Transkription von Gesprächssituationen"}],"_id":"47129","user_id":"40767","department":[{"_id":"54"}],"year":"2023","citation":{"mla":"Cord-Landwehr, Tobias, et al. “A Teacher-Student Approach for Extracting Informative Speaker Embeddings From Speech Mixtures.” <i>INTERSPEECH 2023</i>, ISCA, 2023, doi:<a href=\"https://doi.org/10.21437/interspeech.2023-1379\">10.21437/interspeech.2023-1379</a>.","short":"T. Cord-Landwehr, C. Boeddeker, C. Zorilă, R. Doddipatla, R. Haeb-Umbach, in: INTERSPEECH 2023, ISCA, 2023.","bibtex":"@inproceedings{Cord-Landwehr_Boeddeker_Zorilă_Doddipatla_Haeb-Umbach_2023, title={A Teacher-Student Approach for Extracting Informative Speaker Embeddings From Speech Mixtures}, DOI={<a href=\"https://doi.org/10.21437/interspeech.2023-1379\">10.21437/interspeech.2023-1379</a>}, booktitle={INTERSPEECH 2023}, publisher={ISCA}, author={Cord-Landwehr, Tobias and Boeddeker, Christoph and Zorilă, Cătălin and Doddipatla, Rama and Haeb-Umbach, Reinhold}, year={2023} }","apa":"Cord-Landwehr, T., Boeddeker, C., Zorilă, C., Doddipatla, R., &#38; Haeb-Umbach, R. (2023). A Teacher-Student Approach for Extracting Informative Speaker Embeddings From Speech Mixtures. <i>INTERSPEECH 2023</i>. <a href=\"https://doi.org/10.21437/interspeech.2023-1379\">https://doi.org/10.21437/interspeech.2023-1379</a>","ieee":"T. Cord-Landwehr, C. Boeddeker, C. Zorilă, R. Doddipatla, and R. Haeb-Umbach, “A Teacher-Student Approach for Extracting Informative Speaker Embeddings From Speech Mixtures,” 2023, doi: <a href=\"https://doi.org/10.21437/interspeech.2023-1379\">10.21437/interspeech.2023-1379</a>.","chicago":"Cord-Landwehr, Tobias, Christoph Boeddeker, Cătălin Zorilă, Rama Doddipatla, and Reinhold Haeb-Umbach. “A Teacher-Student Approach for Extracting Informative Speaker Embeddings From Speech Mixtures.” In <i>INTERSPEECH 2023</i>. ISCA, 2023. <a href=\"https://doi.org/10.21437/interspeech.2023-1379\">https://doi.org/10.21437/interspeech.2023-1379</a>.","ama":"Cord-Landwehr T, Boeddeker C, Zorilă C, Doddipatla R, Haeb-Umbach R. A Teacher-Student Approach for Extracting Informative Speaker Embeddings From Speech Mixtures. In: <i>INTERSPEECH 2023</i>. ISCA; 2023. doi:<a href=\"https://doi.org/10.21437/interspeech.2023-1379\">10.21437/interspeech.2023-1379</a>"},"publication_status":"published","has_accepted_license":"1","title":"A Teacher-Student Approach for Extracting Informative Speaker Embeddings From Speech Mixtures","doi":"10.21437/interspeech.2023-1379","publisher":"ISCA","oa":"1","date_updated":"2025-02-12T09:15:28Z","date_created":"2023-09-19T14:34:37Z","author":[{"first_name":"Tobias","last_name":"Cord-Landwehr","full_name":"Cord-Landwehr, Tobias","id":"44393"},{"id":"40767","full_name":"Boeddeker, Christoph","last_name":"Boeddeker","first_name":"Christoph"},{"last_name":"Zorilă","full_name":"Zorilă, Cătălin","first_name":"Cătălin"},{"last_name":"Doddipatla","full_name":"Doddipatla, Rama","first_name":"Rama"},{"full_name":"Haeb-Umbach, Reinhold","id":"242","last_name":"Haeb-Umbach","first_name":"Reinhold"}]},{"language":[{"iso":"eng"}],"department":[{"_id":"54"}],"user_id":"40767","_id":"54439","project":[{"_id":"52","name":"PC2: Computing Resources Provided by the Paderborn Center for Parallel Computing"},{"grant_number":"448568305","_id":"508","name":"Automatische Transkription von Gesprächssituationen"}],"status":"public","publication":"7th International Workshop on Speech Processing in Everyday Environments (CHiME 2023)","type":"conference","doi":"10.21437/chime.2023-10","main_file_link":[{"url":"https://www.isca-archive.org/chime_2023/boeddeker23_chime.pdf","open_access":"1"}],"title":"Multi-stage diarization refinement for the CHiME-7 DASR scenario","author":[{"last_name":"Boeddeker","id":"40767","full_name":"Boeddeker, Christoph","first_name":"Christoph"},{"first_name":"Tobias","full_name":"Cord-Landwehr, Tobias","id":"44393","last_name":"Cord-Landwehr"},{"id":"49870","full_name":"von Neumann, Thilo","last_name":"von Neumann","orcid":"https://orcid.org/0000-0002-7717-8670","first_name":"Thilo"},{"first_name":"Reinhold","id":"242","full_name":"Haeb-Umbach, Reinhold","last_name":"Haeb-Umbach"}],"date_created":"2024-05-23T15:16:15Z","date_updated":"2025-02-12T09:16:13Z","oa":"1","publisher":"ISCA","citation":{"ama":"Boeddeker C, Cord-Landwehr T, von Neumann T, Haeb-Umbach R. Multi-stage diarization refinement for the CHiME-7 DASR scenario. In: <i>7th International Workshop on Speech Processing in Everyday Environments (CHiME 2023)</i>. ISCA; 2023. doi:<a href=\"https://doi.org/10.21437/chime.2023-10\">10.21437/chime.2023-10</a>","chicago":"Boeddeker, Christoph, Tobias Cord-Landwehr, Thilo von Neumann, and Reinhold Haeb-Umbach. “Multi-Stage Diarization Refinement for the CHiME-7 DASR Scenario.” In <i>7th International Workshop on Speech Processing in Everyday Environments (CHiME 2023)</i>. ISCA, 2023. <a href=\"https://doi.org/10.21437/chime.2023-10\">https://doi.org/10.21437/chime.2023-10</a>.","ieee":"C. Boeddeker, T. Cord-Landwehr, T. von Neumann, and R. Haeb-Umbach, “Multi-stage diarization refinement for the CHiME-7 DASR scenario,” 2023, doi: <a href=\"https://doi.org/10.21437/chime.2023-10\">10.21437/chime.2023-10</a>.","short":"C. Boeddeker, T. Cord-Landwehr, T. von Neumann, R. Haeb-Umbach, in: 7th International Workshop on Speech Processing in Everyday Environments (CHiME 2023), ISCA, 2023.","bibtex":"@inproceedings{Boeddeker_Cord-Landwehr_von Neumann_Haeb-Umbach_2023, title={Multi-stage diarization refinement for the CHiME-7 DASR scenario}, DOI={<a href=\"https://doi.org/10.21437/chime.2023-10\">10.21437/chime.2023-10</a>}, booktitle={7th International Workshop on Speech Processing in Everyday Environments (CHiME 2023)}, publisher={ISCA}, author={Boeddeker, Christoph and Cord-Landwehr, Tobias and von Neumann, Thilo and Haeb-Umbach, Reinhold}, year={2023} }","mla":"Boeddeker, Christoph, et al. “Multi-Stage Diarization Refinement for the CHiME-7 DASR Scenario.” <i>7th International Workshop on Speech Processing in Everyday Environments (CHiME 2023)</i>, ISCA, 2023, doi:<a href=\"https://doi.org/10.21437/chime.2023-10\">10.21437/chime.2023-10</a>.","apa":"Boeddeker, C., Cord-Landwehr, T., von Neumann, T., &#38; Haeb-Umbach, R. (2023). Multi-stage diarization refinement for the CHiME-7 DASR scenario. <i>7th International Workshop on Speech Processing in Everyday Environments (CHiME 2023)</i>. <a href=\"https://doi.org/10.21437/chime.2023-10\">https://doi.org/10.21437/chime.2023-10</a>"},"year":"2023","publication_status":"published"},{"main_file_link":[{"url":"https://www.isca-archive.org/interspeech_2023/berger23_interspeech.pdf","open_access":"1"}],"doi":"10.21437/interspeech.2023-1815","title":"Mixture Encoder for Joint Speech Separation and Recognition","date_created":"2023-10-23T15:06:39Z","author":[{"last_name":"Berger","full_name":"Berger, Simon","first_name":"Simon"},{"last_name":"Vieting","full_name":"Vieting, Peter","first_name":"Peter"},{"first_name":"Christoph","full_name":"Boeddeker, Christoph","id":"40767","last_name":"Boeddeker"},{"full_name":"Schlüter, Ralf","last_name":"Schlüter","first_name":"Ralf"},{"id":"242","full_name":"Haeb-Umbach, Reinhold","last_name":"Haeb-Umbach","first_name":"Reinhold"}],"date_updated":"2025-02-12T09:11:30Z","oa":"1","publisher":"ISCA","citation":{"ama":"Berger S, Vieting P, Boeddeker C, Schlüter R, Haeb-Umbach R. Mixture Encoder for Joint Speech Separation and Recognition. In: <i>INTERSPEECH 2023</i>. ISCA; 2023. doi:<a href=\"https://doi.org/10.21437/interspeech.2023-1815\">10.21437/interspeech.2023-1815</a>","ieee":"S. Berger, P. Vieting, C. Boeddeker, R. Schlüter, and R. Haeb-Umbach, “Mixture Encoder for Joint Speech Separation and Recognition,” 2023, doi: <a href=\"https://doi.org/10.21437/interspeech.2023-1815\">10.21437/interspeech.2023-1815</a>.","chicago":"Berger, Simon, Peter Vieting, Christoph Boeddeker, Ralf Schlüter, and Reinhold Haeb-Umbach. “Mixture Encoder for Joint Speech Separation and Recognition.” In <i>INTERSPEECH 2023</i>. ISCA, 2023. <a href=\"https://doi.org/10.21437/interspeech.2023-1815\">https://doi.org/10.21437/interspeech.2023-1815</a>.","mla":"Berger, Simon, et al. “Mixture Encoder for Joint Speech Separation and Recognition.” <i>INTERSPEECH 2023</i>, ISCA, 2023, doi:<a href=\"https://doi.org/10.21437/interspeech.2023-1815\">10.21437/interspeech.2023-1815</a>.","short":"S. Berger, P. Vieting, C. Boeddeker, R. Schlüter, R. Haeb-Umbach, in: INTERSPEECH 2023, ISCA, 2023.","bibtex":"@inproceedings{Berger_Vieting_Boeddeker_Schlüter_Haeb-Umbach_2023, title={Mixture Encoder for Joint Speech Separation and Recognition}, DOI={<a href=\"https://doi.org/10.21437/interspeech.2023-1815\">10.21437/interspeech.2023-1815</a>}, booktitle={INTERSPEECH 2023}, publisher={ISCA}, author={Berger, Simon and Vieting, Peter and Boeddeker, Christoph and Schlüter, Ralf and Haeb-Umbach, Reinhold}, year={2023} }","apa":"Berger, S., Vieting, P., Boeddeker, C., Schlüter, R., &#38; Haeb-Umbach, R. (2023). Mixture Encoder for Joint Speech Separation and Recognition. <i>INTERSPEECH 2023</i>. <a href=\"https://doi.org/10.21437/interspeech.2023-1815\">https://doi.org/10.21437/interspeech.2023-1815</a>"},"year":"2023","publication_status":"published","language":[{"iso":"eng"}],"user_id":"40767","department":[{"_id":"54"}],"project":[{"grant_number":"448568305","_id":"508","name":"Automatische Transkription von Gesprächssituationen"}],"_id":"48390","status":"public","type":"conference","publication":"INTERSPEECH 2023"},{"date_updated":"2026-01-05T10:12:23Z","date_created":"2024-11-14T09:45:03Z","author":[{"id":"49871","full_name":"Kuhlmann, Michael","last_name":"Kuhlmann","first_name":"Michael"},{"last_name":"Meise","full_name":"Meise, Adrian Tobias","id":"79268","first_name":"Adrian Tobias"},{"first_name":"Fritz","full_name":"Seebauer, Fritz","last_name":"Seebauer"},{"first_name":"Petra","last_name":"Wagner","full_name":"Wagner, Petra"},{"last_name":"Häb-Umbach","id":"242","full_name":"Häb-Umbach, Reinhold","first_name":"Reinhold"}],"title":"Investigating Speaker Embedding Disentanglement on Natural Read Speech","year":"2023","page":"121–125","citation":{"apa":"Kuhlmann, M., Meise, A. T., Seebauer, F., Wagner, P., &#38; Häb-Umbach, R. (2023). Investigating Speaker Embedding Disentanglement on Natural Read Speech. <i>Speech Communication; 15th ITG Conference</i>, 121–125.","mla":"Kuhlmann, Michael, et al. “Investigating Speaker Embedding Disentanglement on Natural Read Speech.” <i>Speech Communication; 15th ITG Conference</i>, 2023, pp. 121–125.","short":"M. Kuhlmann, A.T. Meise, F. Seebauer, P. Wagner, R. Häb-Umbach, in: Speech Communication; 15th ITG Conference, 2023, pp. 121–125.","bibtex":"@inproceedings{Kuhlmann_Meise_Seebauer_Wagner_Häb-Umbach_2023, title={Investigating Speaker Embedding Disentanglement on Natural Read Speech}, booktitle={Speech Communication; 15th ITG Conference}, author={Kuhlmann, Michael and Meise, Adrian Tobias and Seebauer, Fritz and Wagner, Petra and Häb-Umbach, Reinhold}, year={2023}, pages={121–125} }","ama":"Kuhlmann M, Meise AT, Seebauer F, Wagner P, Häb-Umbach R. Investigating Speaker Embedding Disentanglement on Natural Read Speech. In: <i>Speech Communication; 15th ITG Conference</i>. ; 2023:121–125.","ieee":"M. Kuhlmann, A. T. Meise, F. Seebauer, P. Wagner, and R. Häb-Umbach, “Investigating Speaker Embedding Disentanglement on Natural Read Speech,” in <i>Speech Communication; 15th ITG Conference</i>, 2023, pp. 121–125.","chicago":"Kuhlmann, Michael, Adrian Tobias Meise, Fritz Seebauer, Petra Wagner, and Reinhold Häb-Umbach. “Investigating Speaker Embedding Disentanglement on Natural Read Speech.” In <i>Speech Communication; 15th ITG Conference</i>, 121–125, 2023."},"_id":"57086","project":[{"name":"PC2: Computing Resources Provided by the Paderborn Center for Parallel Computing","_id":"52"}],"department":[{"_id":"54"}],"user_id":"49871","language":[{"iso":"eng"}],"publication":"Speech Communication; 15th ITG Conference","type":"conference","status":"public"},{"publication_status":"accepted","has_accepted_license":"1","quality_controlled":"1","citation":{"ama":"Heitkämper J, Schmalenstroeer J, Haeb-Umbach R. Neural Network Based Carrier Frequency Offset Estimation From Speech Transmitted Over High Frequency Channels. In: <i>Proceedings of the 30th European Signal Processing Conference (EUSIPCO)</i>.","ieee":"J. Heitkämper, J. Schmalenstroeer, and R. Haeb-Umbach, “Neural Network Based Carrier Frequency Offset Estimation From Speech Transmitted Over High Frequency Channels,” presented at the 30th European Signal Processing Conference (EUSIPCO), Belgrad.","chicago":"Heitkämper, Jens, Joerg Schmalenstroeer, and Reinhold Haeb-Umbach. “Neural Network Based Carrier Frequency Offset Estimation From Speech Transmitted Over High Frequency Channels.” In <i>Proceedings of the 30th European Signal Processing Conference (EUSIPCO)</i>. Belgrad, n.d.","bibtex":"@inproceedings{Heitkämper_Schmalenstroeer_Haeb-Umbach, place={Belgrad}, title={Neural Network Based Carrier Frequency Offset Estimation From Speech Transmitted Over High Frequency Channels}, booktitle={Proceedings of the 30th European Signal Processing Conference (EUSIPCO)}, author={Heitkämper, Jens and Schmalenstroeer, Joerg and Haeb-Umbach, Reinhold} }","short":"J. Heitkämper, J. Schmalenstroeer, R. Haeb-Umbach, in: Proceedings of the 30th European Signal Processing Conference (EUSIPCO), Belgrad, n.d.","mla":"Heitkämper, Jens, et al. “Neural Network Based Carrier Frequency Offset Estimation From Speech Transmitted Over High Frequency Channels.” <i>Proceedings of the 30th European Signal Processing Conference (EUSIPCO)</i>.","apa":"Heitkämper, J., Schmalenstroeer, J., &#38; Haeb-Umbach, R. (n.d.). Neural Network Based Carrier Frequency Offset Estimation From Speech Transmitted Over High Frequency Channels. <i>Proceedings of the 30th European Signal Processing Conference (EUSIPCO)</i>. 30th European Signal Processing Conference (EUSIPCO), Belgrad."},"year":"2022","place":"Belgrad","author":[{"last_name":"Heitkämper","full_name":"Heitkämper, Jens","id":"27643","first_name":"Jens"},{"full_name":"Schmalenstroeer, Joerg","id":"460","last_name":"Schmalenstroeer","first_name":"Joerg"},{"last_name":"Haeb-Umbach","full_name":"Haeb-Umbach, Reinhold","id":"242","first_name":"Reinhold"}],"date_created":"2022-09-22T10:56:13Z","date_updated":"2023-10-26T08:15:57Z","conference":{"end_date":"2022-09-02","location":"Belgrad","name":"30th European Signal Processing Conference (EUSIPCO)","start_date":"2022-08-29"},"title":"Neural Network Based Carrier Frequency Offset Estimation From Speech Transmitted Over High Frequency Channels","type":"conference","publication":"Proceedings of the 30th European Signal Processing Conference (EUSIPCO)","file":[{"content_type":"application/pdf","success":1,"relation":"main_file","date_updated":"2022-09-22T10:48:31Z","date_created":"2022-09-22T10:48:31Z","creator":"jensheit","file_size":1231379,"access_level":"closed","file_name":"cfo.pdf","file_id":"33472"}],"status":"public","abstract":[{"lang":"eng","text":"The intelligibility of demodulated audio signals from analog high frequency transmissions, e.g., using single-sideband\r\n(SSB) modulation, can be severely degraded by channel distortions and/or a mismatch between modulation and demodulation carrier frequency. In this work a neural network (NN)-based approach for carrier frequency offset (CFO) estimation from demodulated SSB signals is proposed, whereby a task specific architecture is presented. Additionally, a simulation framework for SSB signals is introduced and utilized for training the NNs. The CFO estimator is combined with a speech enhancement network to investigate its influence on the enhancement performance. The NN-based system is compared to a recently proposed pitch tracking based approach on publicly available data from real high frequency transmissions. Experiments show that the NN exhibits good CFO estimation properties and results in significant improvements in speech intelligibility, especially when combined with a noise reduction network."}],"user_id":"460","department":[{"_id":"54"}],"project":[{"name":"PC2: Computing Resources Provided by the Paderborn Center for Parallel Computing","_id":"52"}],"_id":"33471","file_date_updated":"2022-09-22T10:48:31Z","language":[{"iso":"eng"}],"ddc":["000"]},{"has_accepted_license":"1","citation":{"short":"T. Cord-Landwehr, T. von Neumann, C. Boeddeker, R. Haeb-Umbach, in: 2022 International Workshop on Acoustic Signal Enhancement (IWAENC), 2022.","bibtex":"@inproceedings{Cord-Landwehr_von Neumann_Boeddeker_Haeb-Umbach_2022, title={MMS-MSG: A Multi-purpose Multi-Speaker Mixture Signal Generator}, booktitle={2022 International Workshop on Acoustic Signal Enhancement (IWAENC)}, author={Cord-Landwehr, Tobias and von Neumann, Thilo and Boeddeker, Christoph and Haeb-Umbach, Reinhold}, year={2022} }","mla":"Cord-Landwehr, Tobias, et al. “MMS-MSG: A Multi-Purpose Multi-Speaker Mixture Signal Generator.” <i>2022 International Workshop on Acoustic Signal Enhancement (IWAENC)</i>, 2022.","apa":"Cord-Landwehr, T., von Neumann, T., Boeddeker, C., &#38; Haeb-Umbach, R. (2022). MMS-MSG: A Multi-purpose Multi-Speaker Mixture Signal Generator. <i>2022 International Workshop on Acoustic Signal Enhancement (IWAENC)</i>. 2022 International Workshop on Acoustic Signal Enhancement (IWAENC), Bamberg.","chicago":"Cord-Landwehr, Tobias, Thilo von Neumann, Christoph Boeddeker, and Reinhold Haeb-Umbach. “MMS-MSG: A Multi-Purpose Multi-Speaker Mixture Signal Generator.” In <i>2022 International Workshop on Acoustic Signal Enhancement (IWAENC)</i>, 2022.","ieee":"T. Cord-Landwehr, T. von Neumann, C. Boeddeker, and R. Haeb-Umbach, “MMS-MSG: A Multi-purpose Multi-Speaker Mixture Signal Generator,” presented at the 2022 International Workshop on Acoustic Signal Enhancement (IWAENC), Bamberg, 2022.","ama":"Cord-Landwehr T, von Neumann T, Boeddeker C, Haeb-Umbach R. MMS-MSG: A Multi-purpose Multi-Speaker Mixture Signal Generator. In: <i>2022 International Workshop on Acoustic Signal Enhancement (IWAENC)</i>. ; 2022."},"author":[{"full_name":"Cord-Landwehr, Tobias","id":"44393","last_name":"Cord-Landwehr","first_name":"Tobias"},{"first_name":"Thilo","id":"49870","full_name":"von Neumann, Thilo","last_name":"von Neumann","orcid":"https://orcid.org/0000-0002-7717-8670"},{"first_name":"Christoph","full_name":"Boeddeker, Christoph","id":"40767","last_name":"Boeddeker"},{"id":"242","full_name":"Haeb-Umbach, Reinhold","last_name":"Haeb-Umbach","first_name":"Reinhold"}],"oa":"1","date_updated":"2023-11-15T14:55:14Z","conference":{"name":"2022 International Workshop on Acoustic Signal Enhancement (IWAENC)","location":"Bamberg"},"type":"conference","status":"public","department":[{"_id":"54"}],"user_id":"44393","_id":"33847","project":[{"_id":"52","name":"PC2: Computing Resources Provided by the Paderborn Center for Parallel Computing"}],"file_date_updated":"2023-11-15T14:54:56Z","quality_controlled":"1","year":"2022","date_created":"2022-10-20T14:02:14Z","title":"MMS-MSG: A Multi-purpose Multi-Speaker Mixture Signal Generator","publication":"2022 International Workshop on Acoustic Signal Enhancement (IWAENC)","file":[{"file_id":"48931","access_level":"open_access","file_name":"mms_msg_camera_ready.pdf","file_size":177975,"date_created":"2023-11-15T14:54:56Z","creator":"cord","date_updated":"2023-11-15T14:54:56Z","relation":"main_file","content_type":"application/pdf"}],"abstract":[{"lang":"eng","text":"The scope of speech enhancement has changed from a monolithic view of single,\r\nindependent tasks, to a joint processing of complex conversational speech\r\nrecordings. Training and evaluation of these single tasks requires synthetic\r\ndata with access to intermediate signals that is as close as possible to the\r\nevaluation scenario. As such data often is not available, many works instead\r\nuse specialized databases for the training of each system component, e.g\r\nWSJ0-mix for source separation. We present a Multi-purpose Multi-Speaker\r\nMixture Signal Generator (MMS-MSG) for generating a variety of speech mixture\r\nsignals based on any speech corpus, ranging from classical anechoic mixtures\r\n(e.g., WSJ0-mix) over reverberant mixtures (e.g., SMS-WSJ) to meeting-style\r\ndata. Its highly modular and flexible structure allows for the simulation of\r\ndiverse environments and dynamic mixing, while simultaneously enabling an easy\r\nextension and modification to generate new scenarios and mixture types. These\r\nmeetings can be used for prototyping, evaluation, or training purposes. We\r\nprovide example evaluation data and baseline results for meetings based on the\r\nWSJ corpus. Further, we demonstrate the usefulness for realistic scenarios by\r\nusing MMS-MSG to provide training data for the LibriCSS database."}],"external_id":{"arxiv":["2209.11494"]},"language":[{"iso":"eng"}],"ddc":["000"]},{"language":[{"iso":"eng"}],"ddc":["004"],"file":[{"date_updated":"2023-11-17T06:39:04Z","creator":"tgburrek","date_created":"2023-11-17T06:39:04Z","file_size":358015,"access_level":"open_access","file_name":"gburrek_icassp22.pdf","file_id":"48990","content_type":"application/pdf","relation":"main_file"}],"publication":"ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","title":"On Synchronization of Wireless Acoustic Sensor Networks in the Presence of Time-Varying Sampling Rate Offsets and Speaker Changes","date_created":"2022-10-18T09:25:51Z","publisher":"IEEE","year":"2022","quality_controlled":"1","file_date_updated":"2023-11-17T06:39:04Z","department":[{"_id":"54"}],"user_id":"44006","_id":"33807","status":"public","type":"conference","doi":"10.1109/icassp43922.2022.9746284","author":[{"first_name":"Tobias","id":"44006","full_name":"Gburrek, Tobias","last_name":"Gburrek"},{"first_name":"Joerg","last_name":"Schmalenstroeer","full_name":"Schmalenstroeer, Joerg","id":"460"},{"full_name":"Haeb-Umbach, Reinhold","id":"242","last_name":"Haeb-Umbach","first_name":"Reinhold"}],"oa":"1","date_updated":"2023-11-17T06:39:28Z","citation":{"chicago":"Gburrek, Tobias, Joerg Schmalenstroeer, and Reinhold Haeb-Umbach. “On Synchronization of Wireless Acoustic Sensor Networks in the Presence of Time-Varying Sampling Rate Offsets and Speaker Changes.” In <i>ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)</i>. IEEE, 2022. <a href=\"https://doi.org/10.1109/icassp43922.2022.9746284\">https://doi.org/10.1109/icassp43922.2022.9746284</a>.","ieee":"T. Gburrek, J. Schmalenstroeer, and R. Haeb-Umbach, “On Synchronization of Wireless Acoustic Sensor Networks in the Presence of Time-Varying Sampling Rate Offsets and Speaker Changes,” 2022, doi: <a href=\"https://doi.org/10.1109/icassp43922.2022.9746284\">10.1109/icassp43922.2022.9746284</a>.","ama":"Gburrek T, Schmalenstroeer J, Haeb-Umbach R. On Synchronization of Wireless Acoustic Sensor Networks in the Presence of Time-Varying Sampling Rate Offsets and Speaker Changes. In: <i>ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)</i>. IEEE; 2022. doi:<a href=\"https://doi.org/10.1109/icassp43922.2022.9746284\">10.1109/icassp43922.2022.9746284</a>","apa":"Gburrek, T., Schmalenstroeer, J., &#38; Haeb-Umbach, R. (2022). On Synchronization of Wireless Acoustic Sensor Networks in the Presence of Time-Varying Sampling Rate Offsets and Speaker Changes. <i>ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)</i>. <a href=\"https://doi.org/10.1109/icassp43922.2022.9746284\">https://doi.org/10.1109/icassp43922.2022.9746284</a>","short":"T. Gburrek, J. Schmalenstroeer, R. Haeb-Umbach, in: ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), IEEE, 2022.","mla":"Gburrek, Tobias, et al. “On Synchronization of Wireless Acoustic Sensor Networks in the Presence of Time-Varying Sampling Rate Offsets and Speaker Changes.” <i>ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)</i>, IEEE, 2022, doi:<a href=\"https://doi.org/10.1109/icassp43922.2022.9746284\">10.1109/icassp43922.2022.9746284</a>.","bibtex":"@inproceedings{Gburrek_Schmalenstroeer_Haeb-Umbach_2022, title={On Synchronization of Wireless Acoustic Sensor Networks in the Presence of Time-Varying Sampling Rate Offsets and Speaker Changes}, DOI={<a href=\"https://doi.org/10.1109/icassp43922.2022.9746284\">10.1109/icassp43922.2022.9746284</a>}, booktitle={ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)}, publisher={IEEE}, author={Gburrek, Tobias and Schmalenstroeer, Joerg and Haeb-Umbach, Reinhold}, year={2022} }"},"has_accepted_license":"1","publication_status":"published"},{"language":[{"iso":"eng"}],"ddc":["000"],"publication":"IEEE Transactions on Vehicular Technology","file":[{"content_type":"application/pdf","relation":"main_file","date_updated":"2022-09-22T07:00:29Z","creator":"huesera","date_created":"2022-09-22T07:00:29Z","file_size":12117870,"access_level":"open_access","file_name":"T-VT_AcceptedVersion.pdf","file_id":"33460"}],"abstract":[{"text":"We present an approach to automatically generate semantic labels for real recordings of automotive range-Doppler (RD) radar spectra. Such labels are required when training a neural network for object recognition from radar data. The automatic labeling approach rests on the simultaneous recording of camera and lidar data in addition to the radar spectrum. By warping radar spectra into the camera image, state-of-the-art object recognition algorithms can be applied to label relevant objects, such as cars, in the camera image. The warping operation is designed to be fully differentiable, which allows backpropagating the gradient computed on the camera image through the warping operation to the neural network operating on the radar data. As the warping operation relies on accurate scene flow estimation, we further propose a novel scene flow estimation algorithm which exploits information from camera, lidar and radar sensors. The\r\nproposed scene flow estimation approach is compared against a state-of-the-art scene flow algorithm, and it outperforms it by approximately 30% w.r.t. mean average error. The feasibility of the overall framework for automatic label generation for\r\nRD spectra is verified by evaluating the performance of neural networks trained with the proposed framework for Direction-of-Arrival estimation.","lang":"eng"}],"date_created":"2022-09-21T07:26:19Z","title":"Warping of Radar Data Into Camera Image for Cross-Modal Supervision in Automotive Applications","issue":"9","quality_controlled":"1","year":"2022","user_id":"242","department":[{"_id":"54"}],"_id":"33451","file_date_updated":"2022-09-22T07:00:29Z","type":"journal_article","status":"public","author":[{"first_name":"Christopher","full_name":"Grimm, Christopher","last_name":"Grimm"},{"first_name":"Tai","full_name":"Fei, Tai","last_name":"Fei"},{"first_name":"Ernst","last_name":"Warsitz","full_name":"Warsitz, Ernst"},{"full_name":"Farhoud, Ridha","last_name":"Farhoud","first_name":"Ridha"},{"first_name":"Tobias","last_name":"Breddermann","full_name":"Breddermann, Tobias"},{"full_name":"Haeb-Umbach, Reinhold","id":"242","last_name":"Haeb-Umbach","first_name":"Reinhold"}],"volume":71,"date_updated":"2023-11-20T16:37:16Z","oa":"1","doi":"10.1109/TVT.2022.3182411","has_accepted_license":"1","citation":{"apa":"Grimm, C., Fei, T., Warsitz, E., Farhoud, R., Breddermann, T., &#38; Haeb-Umbach, R. (2022). Warping of Radar Data Into Camera Image for Cross-Modal Supervision in Automotive Applications. <i>IEEE Transactions on Vehicular Technology</i>, <i>71</i>(9), 9435–9449. <a href=\"https://doi.org/10.1109/TVT.2022.3182411\">https://doi.org/10.1109/TVT.2022.3182411</a>","bibtex":"@article{Grimm_Fei_Warsitz_Farhoud_Breddermann_Haeb-Umbach_2022, title={Warping of Radar Data Into Camera Image for Cross-Modal Supervision in Automotive Applications}, volume={71}, DOI={<a href=\"https://doi.org/10.1109/TVT.2022.3182411\">10.1109/TVT.2022.3182411</a>}, number={9}, journal={IEEE Transactions on Vehicular Technology}, author={Grimm, Christopher and Fei, Tai and Warsitz, Ernst and Farhoud, Ridha and Breddermann, Tobias and Haeb-Umbach, Reinhold}, year={2022}, pages={9435–9449} }","short":"C. Grimm, T. Fei, E. Warsitz, R. Farhoud, T. Breddermann, R. Haeb-Umbach, IEEE Transactions on Vehicular Technology 71 (2022) 9435–9449.","mla":"Grimm, Christopher, et al. “Warping of Radar Data Into Camera Image for Cross-Modal Supervision in Automotive Applications.” <i>IEEE Transactions on Vehicular Technology</i>, vol. 71, no. 9, 2022, pp. 9435–49, doi:<a href=\"https://doi.org/10.1109/TVT.2022.3182411\">10.1109/TVT.2022.3182411</a>.","ieee":"C. Grimm, T. Fei, E. Warsitz, R. Farhoud, T. Breddermann, and R. Haeb-Umbach, “Warping of Radar Data Into Camera Image for Cross-Modal Supervision in Automotive Applications,” <i>IEEE Transactions on Vehicular Technology</i>, vol. 71, no. 9, pp. 9435–9449, 2022, doi: <a href=\"https://doi.org/10.1109/TVT.2022.3182411\">10.1109/TVT.2022.3182411</a>.","chicago":"Grimm, Christopher, Tai Fei, Ernst Warsitz, Ridha Farhoud, Tobias Breddermann, and Reinhold Haeb-Umbach. “Warping of Radar Data Into Camera Image for Cross-Modal Supervision in Automotive Applications.” <i>IEEE Transactions on Vehicular Technology</i> 71, no. 9 (2022): 9435–49. <a href=\"https://doi.org/10.1109/TVT.2022.3182411\">https://doi.org/10.1109/TVT.2022.3182411</a>.","ama":"Grimm C, Fei T, Warsitz E, Farhoud R, Breddermann T, Haeb-Umbach R. Warping of Radar Data Into Camera Image for Cross-Modal Supervision in Automotive Applications. <i>IEEE Transactions on Vehicular Technology</i>. 2022;71(9):9435-9449. doi:<a href=\"https://doi.org/10.1109/TVT.2022.3182411\">10.1109/TVT.2022.3182411</a>"},"intvolume":"        71","page":"9435-9449"},{"file":[{"creator":"huesera","date_created":"2022-10-12T07:09:26Z","date_updated":"2022-10-12T07:09:26Z","access_level":"open_access","file_id":"33697","file_name":"PP_2022_paper_8911.pdf","file_size":109294,"content_type":"application/pdf","relation":"main_file"}],"status":"public","type":"conference","publication":"18. Phonetik und Phonologie im deutschsprachigen Raum (P&P)","ddc":["000"],"language":[{"iso":"eng"}],"file_date_updated":"2022-10-12T07:09:26Z","project":[{"name":"TRR 318 - C06: TRR 318 - Technisch unterstütztes Erklären von Stimmcharakteristika (Teilprojekt C06)","_id":"129","grant_number":"438445824"}],"_id":"33696","user_id":"72602","department":[{"_id":"54"},{"_id":"660"}],"year":"2022","citation":{"short":"J. Wiechmann, T. Glarner, F. Rautenberg, P. Wagner, R. Haeb-Umbach, in: 18. Phonetik Und Phonologie Im Deutschsprachigen Raum (P&#38;P), 2022.","mla":"Wiechmann, Jana, et al. “Technically Enabled Explaining of Voice Characteristics.” <i>18. Phonetik Und Phonologie Im Deutschsprachigen Raum (P&#38;P)</i>, 2022.","bibtex":"@inproceedings{Wiechmann_Glarner_Rautenberg_Wagner_Haeb-Umbach_2022, title={Technically enabled explaining of voice characteristics}, booktitle={18. Phonetik und Phonologie im deutschsprachigen Raum (P&#38;P)}, author={Wiechmann, Jana and Glarner, Thomas and Rautenberg, Frederik and Wagner, Petra and Haeb-Umbach, Reinhold}, year={2022} }","apa":"Wiechmann, J., Glarner, T., Rautenberg, F., Wagner, P., &#38; Haeb-Umbach, R. (2022). Technically enabled explaining of voice characteristics. <i>18. Phonetik Und Phonologie Im Deutschsprachigen Raum (P&#38;P)</i>.","ieee":"J. Wiechmann, T. Glarner, F. Rautenberg, P. Wagner, and R. Haeb-Umbach, “Technically enabled explaining of voice characteristics,” Bielefeld, 2022.","chicago":"Wiechmann, Jana, Thomas Glarner, Frederik Rautenberg, Petra Wagner, and Reinhold Haeb-Umbach. “Technically Enabled Explaining of Voice Characteristics.” In <i>18. Phonetik Und Phonologie Im Deutschsprachigen Raum (P&#38;P)</i>, 2022.","ama":"Wiechmann J, Glarner T, Rautenberg F, Wagner P, Haeb-Umbach R. Technically enabled explaining of voice characteristics. In: <i>18. Phonetik Und Phonologie Im Deutschsprachigen Raum (P&#38;P)</i>. ; 2022."},"has_accepted_license":"1","title":"Technically enabled explaining of voice characteristics","conference":{"end_date":"2022-10-07","location":"Bielefeld","start_date":"2022-10-06"},"oa":"1","date_updated":"2023-11-22T13:45:30Z","date_created":"2022-10-12T07:10:03Z","author":[{"first_name":"Jana","full_name":"Wiechmann, Jana","last_name":"Wiechmann"},{"last_name":"Glarner","full_name":"Glarner, Thomas","first_name":"Thomas"},{"full_name":"Rautenberg, Frederik","id":"72602","last_name":"Rautenberg","first_name":"Frederik"},{"first_name":"Petra","last_name":"Wagner","full_name":"Wagner, Petra"},{"full_name":"Haeb-Umbach, Reinhold","id":"242","last_name":"Haeb-Umbach","first_name":"Reinhold"}]},{"title":"Investigation into Target Speaking Rate Adaptation for Voice Conversion","date_created":"2022-10-21T06:50:59Z","publisher":"ISCA","year":"2022","quality_controlled":"1","language":[{"iso":"eng"}],"ddc":["000"],"file":[{"success":1,"relation":"main_file","content_type":"application/pdf","file_size":303863,"access_level":"closed","file_name":"kuhlmann22_interspeech.pdf","file_id":"46070","date_updated":"2023-07-15T16:16:12Z","date_created":"2023-07-15T16:16:12Z","creator":"mikuhl"}],"publication":"Interspeech 2022","main_file_link":[{"open_access":"1","url":"https://www.isca-speech.org/archive/pdfs/interspeech_2022/kuhlmann22_interspeech.pdf"}],"doi":"10.21437/interspeech.2022-10740","author":[{"full_name":"Kuhlmann, Michael","id":"49871","last_name":"Kuhlmann","first_name":"Michael"},{"last_name":"Seebauer","full_name":"Seebauer, Fritz","first_name":"Fritz"},{"full_name":"Ebbers, Janek","id":"34851","last_name":"Ebbers","first_name":"Janek"},{"first_name":"Petra","last_name":"Wagner","full_name":"Wagner, Petra"},{"full_name":"Haeb-Umbach, Reinhold","id":"242","last_name":"Haeb-Umbach","first_name":"Reinhold"}],"date_updated":"2023-10-25T09:04:45Z","oa":"1","citation":{"bibtex":"@inproceedings{Kuhlmann_Seebauer_Ebbers_Wagner_Haeb-Umbach_2022, title={Investigation into Target Speaking Rate Adaptation for Voice Conversion}, DOI={<a href=\"https://doi.org/10.21437/interspeech.2022-10740\">10.21437/interspeech.2022-10740</a>}, booktitle={Interspeech 2022}, publisher={ISCA}, author={Kuhlmann, Michael and Seebauer, Fritz and Ebbers, Janek and Wagner, Petra and Haeb-Umbach, Reinhold}, year={2022} }","mla":"Kuhlmann, Michael, et al. “Investigation into Target Speaking Rate Adaptation for Voice Conversion.” <i>Interspeech 2022</i>, ISCA, 2022, doi:<a href=\"https://doi.org/10.21437/interspeech.2022-10740\">10.21437/interspeech.2022-10740</a>.","short":"M. Kuhlmann, F. Seebauer, J. Ebbers, P. Wagner, R. Haeb-Umbach, in: Interspeech 2022, ISCA, 2022.","apa":"Kuhlmann, M., Seebauer, F., Ebbers, J., Wagner, P., &#38; Haeb-Umbach, R. (2022). Investigation into Target Speaking Rate Adaptation for Voice Conversion. <i>Interspeech 2022</i>. <a href=\"https://doi.org/10.21437/interspeech.2022-10740\">https://doi.org/10.21437/interspeech.2022-10740</a>","chicago":"Kuhlmann, Michael, Fritz Seebauer, Janek Ebbers, Petra Wagner, and Reinhold Haeb-Umbach. “Investigation into Target Speaking Rate Adaptation for Voice Conversion.” In <i>Interspeech 2022</i>. ISCA, 2022. <a href=\"https://doi.org/10.21437/interspeech.2022-10740\">https://doi.org/10.21437/interspeech.2022-10740</a>.","ieee":"M. Kuhlmann, F. Seebauer, J. Ebbers, P. Wagner, and R. Haeb-Umbach, “Investigation into Target Speaking Rate Adaptation for Voice Conversion,” 2022, doi: <a href=\"https://doi.org/10.21437/interspeech.2022-10740\">10.21437/interspeech.2022-10740</a>.","ama":"Kuhlmann M, Seebauer F, Ebbers J, Wagner P, Haeb-Umbach R. Investigation into Target Speaking Rate Adaptation for Voice Conversion. In: <i>Interspeech 2022</i>. ISCA; 2022. doi:<a href=\"https://doi.org/10.21437/interspeech.2022-10740\">10.21437/interspeech.2022-10740</a>"},"publication_status":"published","has_accepted_license":"1","file_date_updated":"2023-07-15T16:16:12Z","user_id":"34851","department":[{"_id":"54"}],"project":[{"_id":"52","name":"PC2: Computing Resources Provided by the Paderborn Center for Parallel Computing"}],"_id":"33857","status":"public","type":"conference"}]
