@article{46310,
  abstract     = {{Classic automated algorithm selection (AS) for (combinatorial) optimization problems heavily relies on so-called instance features, i.e., numerical characteristics of the problem at hand ideally extracted with computationally low-demanding routines. For the traveling salesperson problem (TSP) a plethora of features have been suggested. Most of these features are, if at all, only normalized imprecisely raising the issue of feature values being strongly affected by the instance size. Such artifacts may have detrimental effects on algorithm selection models. We propose a normalization for two feature groups which stood out in multiple AS studies on the TSP: (a) features based on a minimum spanning tree (MST) and (b) nearest neighbor relationships of the input instance. To this end we theoretically derive minimum and maximum values for properties of MSTs and k-nearest neighbor graphs (NNG) of Euclidean graphs. We analyze the differences in feature space between normalized versions of these features and their unnormalized counterparts. Our empirical investigations on various TSP benchmark sets point out that the feature scaling succeeds in eliminating the effect of the instance size. A proof-of-concept AS-study shows promising results: models trained with normalized features tend to outperform those trained with the respective vanilla features.}},
  author       = {{Heins, Jonathan and Bossek, Jakob and Pohl, Janina and Seiler, Moritz and Trautmann, Heike and Kerschke, Pascal}},
  issn         = {{0304-3975}},
  journal      = {{Theoretical Computer Science}},
  keywords     = {{Feature normalization, Algorithm selection, Traveling salesperson problem}},
  pages        = {{123--145}},
  title        = {{{A study on the effects of normalized TSP features for automated algorithm selection}}},
  doi          = {{https://doi.org/10.1016/j.tcs.2022.10.019}},
  volume       = {{940}},
  year         = {{2023}},
}

@inbook{48881,
  abstract     = {{Classic automated algorithm selection (AS) for (combinatorial) optimization problems heavily relies on so-called instance features, i.e., numerical characteristics of the problem at hand ideally extracted with computationally low-demanding routines. For the traveling salesperson problem (TSP) a plethora of features have been suggested. Most of these features are, if at all, only normalized imprecisely raising the issue of feature values being strongly affected by the instance size. Such artifacts may have detrimental effects on algorithm selection models. We propose a normalization for two feature groups which stood out in multiple AS studies on the TSP: (a) features based on a minimum spanning tree (MST) and (b) a k-nearest neighbor graph (NNG) transformation of the input instance. To this end we theoretically derive minimum and maximum values for properties of MSTs and k-NNGs of Euclidean graphs. We analyze the differences in feature space between normalized versions of these features and their unnormalized counterparts. Our empirical investigations on various TSP benchmark sets point out that the feature scaling succeeds in eliminating the effect of the instance size. Eventually, a proof-of-concept AS-study shows promising results: models trained with normalized features tend to outperform those trained with the respective vanilla features.}},
  author       = {{Heins, Jonathan and Bossek, Jakob and Pohl, Janina and Seiler, Moritz and Trautmann, Heike and Kerschke, Pascal}},
  booktitle    = {{Proceedings of the 16th ACM/SIGEVO Conference on Foundations of Genetic Algorithms}},
  isbn         = {{978-1-4503-8352-3}},
  keywords     = {{automated algorithm selection, graph theory, instance features, normalization, traveling salesperson problem (TSP)}},
  pages        = {{1–15}},
  publisher    = {{Association for Computing Machinery}},
  title        = {{{On the Potential of Normalized TSP Features for Automated Algorithm Selection}}},
  year         = {{2021}},
}

@inproceedings{11869,
  abstract     = {{Amongst several data driven approaches for designing filters for the time sequence of spectral parameters, the linear discriminant analysis (LDA) based method has been proposed for automatic speech recognition. Here we apply LDA-based filter design to cepstral features, which better match the inherent assumption of this method that feature vector components are uncorrelated. Extensive recognition experiments have been conducted both on the standard TIMIT phone recognition task and on a proprietary 130-words command word task under various adverse environmental conditions, including reverberant data with real-life room impulse responses and data processed by acoustic echo cancellation algorithms. Significant error rate reductions have been achieved when applying the novel long-range feature filters compared to standard approaches employing cepstral mean normalization and delta and delta-delta features, in particular when facing acoustic echo cancellation scenarios and room reverberation. For example, the phone accuracy on reverberated TIMIT data could be increased from 50.7\% to 56.0\%}},
  author       = {{Lieb, M. and Haeb-Umbach, Reinhold}},
  booktitle    = {{IEEE International Conference on Acoustics, Speech, and Signal Processing (ICASSP 2000)}},
  keywords     = {{acoustic echo cancellation algorithms, adverse environmental conditions, automatic speech recognition, cepstral analysis, cepstral features, cepstral mean normalization, command word task, delta-delta features, delta features, echo suppression, error rate reductions, feature vector components, FIR filters, LDA derived cepstral trajectory filters, linear discriminant analysis, long-range feature filters, phone accuracy, real-life room impulse responses, reverberant data, spectral parameters, speech recognition, standard TIMIT phone recognition task}},
  pages        = {{II1105--II1108 vol.2}},
  title        = {{{LDA derived cepstral trajectory filters in adverse environmental conditions}}},
  doi          = {{10.1109/ICASSP.2000.859157}},
  volume       = {{2}},
  year         = {{2000}},
}

