@inproceedings{56004,
  author       = {{von Neumann, Thilo and Boeddeker, Christoph and Cord-Landwehr, Tobias and Delcroix, Marc and Haeb-Umbach, Reinhold}},
  booktitle    = {{2024 IEEE International Conference on Acoustics, Speech, and Signal Processing Workshops (ICASSPW)}},
  publisher    = {{IEEE}},
  title        = {{{Meeting Recognition with Continuous Speech Separation and Transcription-Supported Diarization}}},
  doi          = {{10.1109/icasspw62465.2024.10625894}},
  year         = {{2024}},
}

@inproceedings{57659,
  author       = {{Vieting, Peter and Berger, Simon and von Neumann, Thilo and Boeddeker, Christoph and Schlüter, Ralf and Haeb-Umbach, Reinhold}},
  booktitle    = {{2024 IEEE Spoken Language Technology Workshop (SLT)}},
  title        = {{{Combining TF-GridNet and Mixture Encoder for Continuous Speech Separation for Meeting Transcription}}},
  year         = {{2024}},
}

@article{35602,
  abstract     = {{Continuous Speech Separation (CSS) has been proposed to address speech overlaps during the analysis of realistic meeting-like conversations by eliminating any overlaps before further processing.
CSS separates a recording of arbitrarily many speakers into a small number of overlap-free output channels, where each output channel may contain speech of multiple speakers.
This is often done by applying a conventional separation model trained with Utterance-level Permutation Invariant Training (uPIT), which exclusively maps a speaker to an output channel, in sliding window approach called stitching.
Recently, we introduced an alternative training scheme called Graph-PIT that teaches the separation network to directly produce output streams in the required format without stitching.
It can handle an arbitrary number of speakers as long as never more of them overlap at the same time than the separator has output channels.
In this contribution, we further investigate the Graph-PIT training scheme.
We show in extended experiments that models trained with Graph-PIT also work in challenging reverberant conditions.
Models trained in this way are able to perform segment-less CSS, i.e., without stitching, and achieve comparable and often better separation quality than the conventional CSS with uPIT and stitching.
We simplify the training schedule for Graph-PIT with the recently proposed Source Aggregated Signal-to-Distortion Ratio (SA-SDR) loss.
It eliminates unfavorable properties of the previously used A-SDR loss and thus enables training with Graph-PIT from scratch.
Graph-PIT training relaxes the constraints w.r.t. the allowed numbers of speakers and speaking patterns which allows using a larger variety of training data.
Furthermore, we introduce novel signal-level evaluation metrics for meeting scenarios, namely the source-aggregated scale- and convolution-invariant Signal-to-Distortion Ratio (SA-SI-SDR and SA-CI-SDR), which are generalizations of the commonly used SDR-based metrics for the CSS case.}},
  author       = {{von Neumann, Thilo and Kinoshita, Keisuke and Boeddeker, Christoph and Delcroix, Marc and Haeb-Umbach, Reinhold}},
  issn         = {{2329-9290}},
  journal      = {{IEEE/ACM Transactions on Audio, Speech, and Language Processing}},
  keywords     = {{Continuous Speech Separation, Source Separation, Graph-PIT, Dynamic Programming, Permutation Invariant Training}},
  pages        = {{576--589}},
  publisher    = {{Institute of Electrical and Electronics Engineers (IEEE)}},
  title        = {{{Segment-Less Continuous Speech Separation of Meetings: Training and Evaluation Criteria}}},
  doi          = {{10.1109/taslp.2022.3228629}},
  volume       = {{31}},
  year         = {{2023}},
}

@inproceedings{48281,
  abstract     = {{	We propose a general framework to compute the word error rate (WER) of ASR systems that process recordings containing multiple speakers at their input and that produce multiple output word sequences (MIMO).
	Such ASR systems are typically required, e.g., for meeting transcription.
	We provide an efficient implementation based on a dynamic programming search in a multi-dimensional Levenshtein distance tensor under the constraint that a reference utterance must be matched consistently with one hypothesis output. 
	This also results in an efficient implementation of the ORC WER which previously suffered from exponential complexity.
	We give an overview of commonly used WER definitions for multi-speaker scenarios and show that they are specializations of the above MIMO WER tuned to particular application scenarios. 
	We conclude with a  discussion of the pros and cons of the various WER definitions and a recommendation when to use which.}},
  author       = {{von Neumann, Thilo and Boeddeker, Christoph and Kinoshita, Keisuke and Delcroix, Marc and Haeb-Umbach, Reinhold}},
  booktitle    = {{ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)}},
  keywords     = {{Word Error Rate, Meeting Recognition, Levenshtein Distance}},
  publisher    = {{IEEE}},
  title        = {{{On Word Error Rate Definitions and Their Efficient Computation for Multi-Speaker Speech Recognition Systems}}},
  doi          = {{10.1109/icassp49357.2023.10094784}},
  year         = {{2023}},
}

@inproceedings{48275,
  abstract     = {{MeetEval is an open-source toolkit to evaluate  all kinds of meeting transcription systems.
It provides a unified interface for the computation of commonly used Word Error Rates (WERs), specifically cpWER, ORC WER and MIMO WER along other WER definitions.
We extend the cpWER computation by a temporal constraint to ensure that only words are identified as correct when the temporal alignment is plausible.
This leads to a better quality of the matching of the hypothesis string to the reference string that more closely resembles the actual transcription quality, and a system is penalized if it provides poor time annotations.
Since word-level timing information is often not available, we present a way to approximate exact word-level timings from segment-level timings (e.g., a sentence) and show that the approximation leads to a similar WER as a matching with exact word-level annotations.
At the same time, the time constraint leads to a speedup of the matching algorithm, which outweighs the additional overhead caused by processing the time stamps.}},
  author       = {{von Neumann, Thilo and Boeddeker, Christoph and Delcroix, Marc and Haeb-Umbach, Reinhold}},
  booktitle    = {{Proc. CHiME 2023 Workshop on Speech Processing in Everyday Environments}},
  keywords     = {{Speech Recognition, Word Error Rate, Meeting Transcription}},
  location     = {{Dublin}},
  title        = {{{MeetEval: A Toolkit for Computation of Word Error Rates for Meeting Transcription Systems}}},
  year         = {{2023}},
}

@inproceedings{54439,
  author       = {{Boeddeker, Christoph and Cord-Landwehr, Tobias and von Neumann, Thilo and Haeb-Umbach, Reinhold}},
  booktitle    = {{7th International Workshop on Speech Processing in Everyday Environments (CHiME 2023)}},
  publisher    = {{ISCA}},
  title        = {{{Multi-stage diarization refinement for the CHiME-7 DASR scenario}}},
  doi          = {{10.21437/chime.2023-10}},
  year         = {{2023}},
}

@inproceedings{33847,
  abstract     = {{The scope of speech enhancement has changed from a monolithic view of single,
independent tasks, to a joint processing of complex conversational speech
recordings. Training and evaluation of these single tasks requires synthetic
data with access to intermediate signals that is as close as possible to the
evaluation scenario. As such data often is not available, many works instead
use specialized databases for the training of each system component, e.g
WSJ0-mix for source separation. We present a Multi-purpose Multi-Speaker
Mixture Signal Generator (MMS-MSG) for generating a variety of speech mixture
signals based on any speech corpus, ranging from classical anechoic mixtures
(e.g., WSJ0-mix) over reverberant mixtures (e.g., SMS-WSJ) to meeting-style
data. Its highly modular and flexible structure allows for the simulation of
diverse environments and dynamic mixing, while simultaneously enabling an easy
extension and modification to generate new scenarios and mixture types. These
meetings can be used for prototyping, evaluation, or training purposes. We
provide example evaluation data and baseline results for meetings based on the
WSJ corpus. Further, we demonstrate the usefulness for realistic scenarios by
using MMS-MSG to provide training data for the LibriCSS database.}},
  author       = {{Cord-Landwehr, Tobias and von Neumann, Thilo and Boeddeker, Christoph and Haeb-Umbach, Reinhold}},
  booktitle    = {{2022 International Workshop on Acoustic Signal Enhancement (IWAENC)}},
  location     = {{Bamberg}},
  title        = {{{MMS-MSG: A Multi-purpose Multi-Speaker Mixture Signal Generator}}},
  year         = {{2022}},
}

@inproceedings{33848,
  abstract     = {{Impressive progress in neural network-based single-channel speech source
separation has been made in recent years. But those improvements have been
mostly reported on anechoic data, a situation that is hardly met in practice.
Taking the SepFormer as a starting point, which achieves state-of-the-art
performance on anechoic mixtures, we gradually modify it to optimize its
performance on reverberant mixtures. Although this leads to a word error rate
improvement by 7 percentage points compared to the standard SepFormer
implementation, the system ends up with only marginally better performance than
a PIT-BLSTM separation system, that is optimized with rather straightforward
means. This is surprising and at the same time sobering, challenging the
practical usefulness of many improvements reported in recent years for monaural
source separation on nonreverberant data.}},
  author       = {{Cord-Landwehr, Tobias and Boeddeker, Christoph and von Neumann, Thilo and Zorila, Catalin and Doddipatla, Rama and Haeb-Umbach, Reinhold}},
  booktitle    = {{2022 International Workshop on Acoustic Signal Enhancement (IWAENC)}},
  publisher    = {{IEEE}},
  title        = {{{Monaural source separation: From anechoic to reverberant environments}}},
  year         = {{2022}},
}

@inproceedings{33819,
  author       = {{von Neumann, Thilo and Kinoshita, Keisuke and Boeddeker, Christoph and Delcroix, Marc and Haeb-Umbach, Reinhold}},
  booktitle    = {{ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)}},
  publisher    = {{IEEE}},
  title        = {{{SA-SDR: A Novel Loss Function for Separation of Meeting Style Data}}},
  doi          = {{10.1109/icassp43922.2022.9746757}},
  year         = {{2022}},
}

@misc{33816,
  author       = {{Gburrek, Tobias and Boeddeker, Christoph and von Neumann, Thilo and Cord-Landwehr, Tobias and Schmalenstroeer, Joerg and Haeb-Umbach, Reinhold}},
  publisher    = {{arXiv}},
  title        = {{{A Meeting Transcription System for an Ad-Hoc Acoustic Sensor Network}}},
  doi          = {{10.48550/ARXIV.2205.00944}},
  year         = {{2022}},
}

@inproceedings{33954,
  author       = {{Boeddeker, Christoph and Cord-Landwehr, Tobias and von Neumann, Thilo and Haeb-Umbach, Reinhold}},
  booktitle    = {{Interspeech 2022}},
  publisher    = {{ISCA}},
  title        = {{{An Initialization Scheme for Meeting Separation with Spatial Mixture Models}}},
  doi          = {{10.21437/interspeech.2022-10929}},
  year         = {{2022}},
}

@inproceedings{33958,
  abstract     = {{Recent speaker diarization studies showed that integration of end-to-end neural diarization (EEND) and clustering-based diarization is a promising approach for achieving state-of-the-art performance on various tasks. Such an approach first divides an observed signal into fixed-length segments, then performs {\it segment-level} local diarization based on an EEND module, and merges the segment-level results via clustering to form a final global diarization result. The segmentation is done to limit the number of speakers in each segment since the current EEND cannot handle a large number of speakers. In this paper, we argue that such an approach involving the segmentation has several issues; for example, it inevitably faces a dilemma that larger segment sizes increase both the context available for enhancing the performance and the number of speakers for the local EEND module to handle. To resolve such a problem, this paper proposes a novel framework that performs diarization without segmentation. However, it can still handle challenging data containing many speakers and a significant amount of overlapping speech. The proposed method can take an entire meeting for inference and perform {\it utterance-by-utterance} diarization that clusters utterance activities in terms of speakers. To this end, we leverage a neural network training scheme called Graph-PIT proposed recently for neural source separation. Experiments with simulated active-meeting-like data and CALLHOME data show the superiority of the proposed approach over the conventional methods.}},
  author       = {{Kinoshita, Keisuke and von Neumann, Thilo and Delcroix, Marc and Boeddeker, Christoph and Haeb-Umbach, Reinhold}},
  booktitle    = {{Proc. Interspeech 2022}},
  pages        = {{1486--1490}},
  publisher    = {{ISCA}},
  title        = {{{Utterance-by-utterance overlap-aware neural diarization with Graph-PIT}}},
  doi          = {{10.21437/Interspeech.2022-11408}},
  year         = {{2022}},
}

@inproceedings{26770,
  abstract     = {{Automatic transcription of meetings requires handling of overlapped speech, which calls for continuous speech separation (CSS) systems. The uPIT criterion was proposed for utterance-level separation with neural networks and introduces the constraint that the total number of speakers must not exceed the number of output channels. When processing meeting-like data in a segment-wise manner, i.e., by separating overlapping segments independently and stitching adjacent segments to continuous output streams, this constraint has to be fulfilled for any segment. In this contribution, we show that this constraint can be significantly relaxed. We propose a novel graph-based PIT criterion, which casts the assignment of utterances to output channels in a graph coloring problem. It only requires that the number of concurrently active speakers must not exceed the number of output channels. As a consequence, the system can process an arbitrary number of speakers and arbitrarily long segments and thus can handle more diverse scenarios.
Further, the stitching algorithm for obtaining a consistent output order in neighboring segments is of less importance and can even be eliminated completely, not the least reducing the computational effort. Experiments on meeting-style WSJ data show improvements in recognition performance over using the uPIT criterion. }},
  author       = {{von Neumann, Thilo and Kinoshita, Keisuke and Boeddeker, Christoph and Delcroix, Marc and Haeb-Umbach, Reinhold}},
  booktitle    = {{Interspeech 2021}},
  keywords     = {{Continuous speech separation, automatic speech recognition, overlapped speech, permutation invariant training}},
  title        = {{{Graph-PIT: Generalized Permutation Invariant Training for Continuous Separation of Arbitrary Numbers of Speakers}}},
  doi          = {{10.21437/interspeech.2021-1177}},
  year         = {{2021}},
}

@inproceedings{29173,
  author       = {{von Neumann, Thilo and Boeddeker, Christoph and Kinoshita, Keisuke and Delcroix, Marc and Haeb-Umbach, Reinhold}},
  booktitle    = {{Speech Communication; 14th ITG Conference}},
  location     = {{Kiel}},
  title        = {{{Speeding Up Permutation Invariant Training for Source Separation}}},
  year         = {{2021}},
}

@inproceedings{20762,
  abstract     = {{The rising interest in single-channel multi-speaker speech separation sparked development of End-to-End (E2E) approaches to multispeaker speech recognition. However, up until now, state-of-theart neural network–based time domain source separation has not yet been combined with E2E speech recognition. We here demonstrate how to combine a separation module based on a Convolutional Time domain Audio Separation Network (Conv-TasNet) with an E2E speech recognizer and how to train such a model jointly by distributing it over multiple GPUs or by approximating truncated back-propagation for the convolutional front-end. To put this work into perspective and illustrate the complexity of the design space, we provide a compact overview of single-channel multi-speaker recognition systems. Our experiments show a word error rate of 11.0% on WSJ0-2mix and indicate that our joint time domain model can yield substantial improvements over cascade DNN-HMM and monolithic E2E frequency domain systems proposed so far.}},
  author       = {{von Neumann, Thilo and Kinoshita, Keisuke and Drude, Lukas and Boeddeker, Christoph and Delcroix, Marc and Nakatani, Tomohiro and Haeb-Umbach, Reinhold}},
  booktitle    = {{ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)}},
  pages        = {{7004--7008}},
  title        = {{{End-to-End Training of Time Domain Audio Separation and Recognition}}},
  doi          = {{10.1109/ICASSP40776.2020.9053461}},
  year         = {{2020}},
}

@inproceedings{20764,
  abstract     = {{Most approaches to multi-talker overlapped speech separation and recognition assume that the number of simultaneously active speakers is given, but in realistic situations, it is typically unknown. To cope with this, we extend an iterative speech extraction system with mechanisms to count the number of sources and combine it with a single-talker speech recognizer to form the first end-to-end multi-talker automatic speech recognition system for an unknown number of active speakers. Our experiments show very promising performance in counting accuracy, source separation and speech recognition on simulated clean mixtures from WSJ0-2mix and WSJ0-3mix. Among others, we set a new state-of-the-art word error rate on the WSJ0-2mix database. Furthermore, our system generalizes well to a larger number of speakers than it ever saw during training, as shown in experiments with the WSJ0-4mix database. }},
  author       = {{von Neumann, Thilo and Boeddeker, Christoph and Drude, Lukas and Kinoshita, Keisuke and Delcroix, Marc and Nakatani, Tomohiro and Haeb-Umbach, Reinhold}},
  booktitle    = {{Proc. Interspeech 2020}},
  pages        = {{3097--3101}},
  title        = {{{Multi-Talker ASR for an Unknown Number of Sources: Joint Training of Source Counting, Separation and ASR}}},
  doi          = {{10.21437/Interspeech.2020-2519}},
  year         = {{2020}},
}

@inproceedings{20766,
  abstract     = {{Recently, the source separation performance was greatly improved by time-domain audio source separation based on dual-path recurrent neural network (DPRNN). DPRNN is a simple but effective model for a long sequential data. While DPRNN is quite efficient in modeling a sequential data of the length of an utterance, i.e., about 5 to 10 second data, it is harder to apply it to longer sequences such as whole conversations consisting of multiple utterances. It is simply because, in such a case, the number of time steps consumed by its internal module called inter-chunk RNN becomes extremely large. To mitigate this problem, this paper proposes a multi-path RNN (MPRNN), a generalized version of DPRNN, that models the input data in a hierarchical manner. In the MPRNN framework, the input data is represented at several (>_ 3) time-resolutions, each of which is modeled by a specific RNN sub-module. For example, the RNN sub-module that deals with the finest resolution may model temporal relationship only within a phoneme, while the RNN sub-module handling the most coarse resolution may capture only the relationship between utterances such as speaker information. We perform experiments using simulated dialogue-like mixtures and show that MPRNN has greater model capacity, and it outperforms the current state-of-the-art DPRNN framework especially in online processing scenarios.}},
  author       = {{Kinoshita, Keisuke and von Neumann, Thilo and Delcroix, Marc and Nakatani, Tomohiro and Haeb-Umbach, Reinhold}},
  booktitle    = {{Proc. Interspeech 2020}},
  pages        = {{2652--2656}},
  title        = {{{Multi-Path RNN for Hierarchical Modeling of Long Sequential Data and its Application to Speaker Stream Separation}}},
  doi          = {{10.21437/Interspeech.2020-2388}},
  year         = {{2020}},
}

