@inproceedings{57160,
  abstract     = {{Large audio tagging models are usually trained or pre-trained on AudioSet, a dataset that encompasses a large amount of different sound classes and acoustic environments. Knowledge distillation has emerged as a method to compress such models without compromising their effectiveness. There are many different applications for audio tagging, some of which require a specialization to a narrow domain of sounds to be classified. For these scenarios, it is beneficial to distill the large audio tagger with respect to a specific subset of sounds of interest. A method to prune a general dataset with respect to a target dataset is presented. By distilling with such a specialized pruned dataset, we obtain a compressed model with better classification accuracy in the specific target domain than with target-agnostic distillation.}},
  author       = {{Werning, Alexander and Haeb-Umbach, Reinhold}},
  booktitle    = {{32nd European Signal Processing Conference (EUSIPCO 2024)}},
  keywords     = {{data pruning, knowledge distillation, audio tagging}},
  location     = {{Lyon}},
  title        = {{{Target-Specific Dataset Pruning for Compression of Audio Tagging Models}}},
  year         = {{2024}},
}

