@article{52958,
  author       = {{Boeddeker, Christoph and Subramanian, Aswin Shanmugam and Wichern, Gordon and Haeb-Umbach, Reinhold and Le Roux, Jonathan}},
  issn         = {{2329-9290}},
  journal      = {{IEEE/ACM Transactions on Audio, Speech, and Language Processing}},
  keywords     = {{Electrical and Electronic Engineering, Acoustics and Ultrasonics, Computer Science (miscellaneous), Computational Mathematics}},
  pages        = {{1185--1197}},
  publisher    = {{Institute of Electrical and Electronics Engineers (IEEE)}},
  title        = {{{TS-SEP: Joint Diarization and Separation Conditioned on Estimated Speaker Embeddings}}},
  doi          = {{10.1109/taslp.2024.3350887}},
  volume       = {{32}},
  year         = {{2024}},
}

@inproceedings{56004,
  author       = {{von Neumann, Thilo and Boeddeker, Christoph and Cord-Landwehr, Tobias and Delcroix, Marc and Haeb-Umbach, Reinhold}},
  booktitle    = {{2024 IEEE International Conference on Acoustics, Speech, and Signal Processing Workshops (ICASSPW)}},
  publisher    = {{IEEE}},
  title        = {{{Meeting Recognition with Continuous Speech Separation and Transcription-Supported Diarization}}},
  doi          = {{10.1109/icasspw62465.2024.10625894}},
  year         = {{2024}},
}

@inproceedings{56272,
  author       = {{Boeddeker, Christoph and Cord-Landwehr, Tobias and Haeb-Umbach, Reinhold}},
  booktitle    = {{Interspeech 2024}},
  publisher    = {{ISCA}},
  title        = {{{Once more Diarization: Improving meeting transcription systems through segment-level speaker reassignment}}},
  doi          = {{10.21437/interspeech.2024-1286}},
  year         = {{2024}},
}

@inproceedings{57659,
  author       = {{Vieting, Peter and Berger, Simon and von Neumann, Thilo and Boeddeker, Christoph and Schlüter, Ralf and Haeb-Umbach, Reinhold}},
  booktitle    = {{2024 IEEE Spoken Language Technology Workshop (SLT)}},
  title        = {{{Combining TF-GridNet and Mixture Encoder for Continuous Speech Separation for Meeting Transcription}}},
  year         = {{2024}},
}

@inproceedings{57085,
  abstract     = {{We propose an approach for simultaneous diarization and separation of meeting data. It consists of a complex Angular Central Gaussian Mixture Model (cACGMM) for speech source separation, and a von-Mises-Fisher Mixture Model (VMFMM) for diarization in a joint statistical framework. Through the integration, both spatial and spectral information are exploited for diarization and separation. We also develop a method for counting the number of active speakers in a segment of a meeting to support block-wise processing. While the total number of speakers in a meeting may be known, it is usually not known on a per-segment level. With the proposed speaker counting, joint diarization and source separation can be done segment-by-segment, and the permutation problem across segments is solved, thus allowing for block-online processing in the future. Experimental results on the LibriCSS meeting corpus show that the integrated approach outperforms a cascaded approach of diarization and speech enhancement in terms of WER, both on a per-segment and on a per-meeting level.}},
  author       = {{Cord-Landwehr, Tobias and Boeddeker, Christoph and Haeb-Umbach, Reinhold}},
  booktitle    = {{ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)}},
  keywords     = {{diarization, source separation, mixture model, meeting}},
  location     = {{Hyderabad, India}},
  title        = {{{Simultaneous Diarization and Separation of Meetings through the Integration of Statistical Mixture Models}}},
  doi          = {{10.1109/ICASSP49660.2025.10888445}},
  year         = {{2024}},
}

@inproceedings{53659,
  author       = {{Cord-Landwehr, Tobias and Boeddeker, Christoph and Zorilă, Cătălin and Doddipatla, Rama and Haeb-Umbach, Reinhold}},
  booktitle    = {{ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)}},
  location     = {{Seoul}},
  publisher    = {{IEEE}},
  title        = {{{Geodesic Interpolation of Frame-Wise Speaker Embeddings for the Diarization of Meeting Scenarios}}},
  doi          = {{10.1109/icassp48485.2024.10445911}},
  year         = {{2024}},
}

@inproceedings{48281,
  abstract     = {{	We propose a general framework to compute the word error rate (WER) of ASR systems that process recordings containing multiple speakers at their input and that produce multiple output word sequences (MIMO).
	Such ASR systems are typically required, e.g., for meeting transcription.
	We provide an efficient implementation based on a dynamic programming search in a multi-dimensional Levenshtein distance tensor under the constraint that a reference utterance must be matched consistently with one hypothesis output. 
	This also results in an efficient implementation of the ORC WER which previously suffered from exponential complexity.
	We give an overview of commonly used WER definitions for multi-speaker scenarios and show that they are specializations of the above MIMO WER tuned to particular application scenarios. 
	We conclude with a  discussion of the pros and cons of the various WER definitions and a recommendation when to use which.}},
  author       = {{von Neumann, Thilo and Boeddeker, Christoph and Kinoshita, Keisuke and Delcroix, Marc and Haeb-Umbach, Reinhold}},
  booktitle    = {{ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)}},
  keywords     = {{Word Error Rate, Meeting Recognition, Levenshtein Distance}},
  publisher    = {{IEEE}},
  title        = {{{On Word Error Rate Definitions and Their Efficient Computation for Multi-Speaker Speech Recognition Systems}}},
  doi          = {{10.1109/icassp49357.2023.10094784}},
  year         = {{2023}},
}

@inproceedings{48275,
  abstract     = {{MeetEval is an open-source toolkit to evaluate  all kinds of meeting transcription systems.
It provides a unified interface for the computation of commonly used Word Error Rates (WERs), specifically cpWER, ORC WER and MIMO WER along other WER definitions.
We extend the cpWER computation by a temporal constraint to ensure that only words are identified as correct when the temporal alignment is plausible.
This leads to a better quality of the matching of the hypothesis string to the reference string that more closely resembles the actual transcription quality, and a system is penalized if it provides poor time annotations.
Since word-level timing information is often not available, we present a way to approximate exact word-level timings from segment-level timings (e.g., a sentence) and show that the approximation leads to a similar WER as a matching with exact word-level annotations.
At the same time, the time constraint leads to a speedup of the matching algorithm, which outweighs the additional overhead caused by processing the time stamps.}},
  author       = {{von Neumann, Thilo and Boeddeker, Christoph and Delcroix, Marc and Haeb-Umbach, Reinhold}},
  booktitle    = {{Proc. CHiME 2023 Workshop on Speech Processing in Everyday Environments}},
  keywords     = {{Speech Recognition, Word Error Rate, Meeting Transcription}},
  location     = {{Dublin}},
  title        = {{{MeetEval: A Toolkit for Computation of Word Error Rates for Meeting Transcription Systems}}},
  year         = {{2023}},
}

@inproceedings{47128,
  author       = {{Cord-Landwehr, Tobias and Boeddeker, Christoph and Zorilă, Cătălin and Doddipatla, Rama and Haeb-Umbach, Reinhold}},
  booktitle    = {{ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)}},
  location     = {{Rhodes}},
  publisher    = {{IEEE}},
  title        = {{{Frame-Wise and Overlap-Robust Speaker Embeddings for Meeting Diarization}}},
  doi          = {{10.1109/icassp49357.2023.10095370}},
  year         = {{2023}},
}

@inproceedings{47129,
  author       = {{Cord-Landwehr, Tobias and Boeddeker, Christoph and Zorilă, Cătălin and Doddipatla, Rama and Haeb-Umbach, Reinhold}},
  booktitle    = {{INTERSPEECH 2023}},
  publisher    = {{ISCA}},
  title        = {{{A Teacher-Student Approach for Extracting Informative Speaker Embeddings From Speech Mixtures}}},
  doi          = {{10.21437/interspeech.2023-1379}},
  year         = {{2023}},
}

@inproceedings{54439,
  author       = {{Boeddeker, Christoph and Cord-Landwehr, Tobias and von Neumann, Thilo and Haeb-Umbach, Reinhold}},
  booktitle    = {{7th International Workshop on Speech Processing in Everyday Environments (CHiME 2023)}},
  publisher    = {{ISCA}},
  title        = {{{Multi-stage diarization refinement for the CHiME-7 DASR scenario}}},
  doi          = {{10.21437/chime.2023-10}},
  year         = {{2023}},
}

@inproceedings{48390,
  author       = {{Berger, Simon and Vieting, Peter and Boeddeker, Christoph and Schlüter, Ralf and Haeb-Umbach, Reinhold}},
  booktitle    = {{INTERSPEECH 2023}},
  publisher    = {{ISCA}},
  title        = {{{Mixture Encoder for Joint Speech Separation and Recognition}}},
  doi          = {{10.21437/interspeech.2023-1815}},
  year         = {{2023}},
}

@inproceedings{33848,
  abstract     = {{Impressive progress in neural network-based single-channel speech source
separation has been made in recent years. But those improvements have been
mostly reported on anechoic data, a situation that is hardly met in practice.
Taking the SepFormer as a starting point, which achieves state-of-the-art
performance on anechoic mixtures, we gradually modify it to optimize its
performance on reverberant mixtures. Although this leads to a word error rate
improvement by 7 percentage points compared to the standard SepFormer
implementation, the system ends up with only marginally better performance than
a PIT-BLSTM separation system, that is optimized with rather straightforward
means. This is surprising and at the same time sobering, challenging the
practical usefulness of many improvements reported in recent years for monaural
source separation on nonreverberant data.}},
  author       = {{Cord-Landwehr, Tobias and Boeddeker, Christoph and von Neumann, Thilo and Zorila, Catalin and Doddipatla, Rama and Haeb-Umbach, Reinhold}},
  booktitle    = {{2022 International Workshop on Acoustic Signal Enhancement (IWAENC)}},
  publisher    = {{IEEE}},
  title        = {{{Monaural source separation: From anechoic to reverberant environments}}},
  year         = {{2022}},
}

@inproceedings{33819,
  author       = {{von Neumann, Thilo and Kinoshita, Keisuke and Boeddeker, Christoph and Delcroix, Marc and Haeb-Umbach, Reinhold}},
  booktitle    = {{ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)}},
  publisher    = {{IEEE}},
  title        = {{{SA-SDR: A Novel Loss Function for Separation of Meeting Style Data}}},
  doi          = {{10.1109/icassp43922.2022.9746757}},
  year         = {{2022}},
}

@misc{33816,
  author       = {{Gburrek, Tobias and Boeddeker, Christoph and von Neumann, Thilo and Cord-Landwehr, Tobias and Schmalenstroeer, Joerg and Haeb-Umbach, Reinhold}},
  publisher    = {{arXiv}},
  title        = {{{A Meeting Transcription System for an Ad-Hoc Acoustic Sensor Network}}},
  doi          = {{10.48550/ARXIV.2205.00944}},
  year         = {{2022}},
}

@inproceedings{33954,
  author       = {{Boeddeker, Christoph and Cord-Landwehr, Tobias and von Neumann, Thilo and Haeb-Umbach, Reinhold}},
  booktitle    = {{Interspeech 2022}},
  publisher    = {{ISCA}},
  title        = {{{An Initialization Scheme for Meeting Separation with Spatial Mixture Models}}},
  doi          = {{10.21437/interspeech.2022-10929}},
  year         = {{2022}},
}

