@article{54548,
  author       = {{Prager, Raphael Patrick and Trautmann, Heike}},
  journal      = {{IEEE Transactions on Evolutionary Computation}},
  keywords     = {{Optimization, Evolutionary computation, Benchmark testing, Hyperparameter optimization, Portfolios, Extraterrestrial measurements, Dispersion, Exploratory landscape analysis, mixed-variable problem, mixed search spaces, automated algorithm selection}},
  pages        = {{1--1}},
  title        = {{{Exploratory Landscape Analysis for Mixed-Variable Problems}}},
  doi          = {{10.1109/TEVC.2024.3399560}},
  year         = {{2024}},
}

@inproceedings{56277,
  abstract     = {{What is learner-sensitive feedback to argumentative learner texts when it is to be issued computer- based? Learning stages are difficult to quantify. The paper provides insight into the history of research since the 1980s and a preview of what this automated feedback might look like. These questions are embedded in a research project at the Universities of Paderborn and Hannover, Germany, from which a software (project name ArgSchool) emerges that will provide such feedback.}},
  author       = {{Kilsbach, Sebastian and Michel, Nadine}},
  booktitle    = {{Proceedings of the Tenth Conference of the International Society for the Study of Argumentation}},
  keywords     = {{AI, argumentation mining, discourse history, (automated, learner-sensitive) feedback}},
  location     = {{Leiden}},
  title        = {{{Computer-Based Generation of Learner-Sensitive Feedback to Argumentative Learner Texts}}},
  year         = {{2024}},
}

@inbook{52662,
  abstract     = {{Static analysis tools support developers in detecting potential coding issues, such as bugs or vulnerabilities. Research emphasizes technical challenges of such tools but also mentions severe usability shortcomings. These shortcomings hinder the adoption of static analysis tools, and user dissatisfaction may even lead to tool abandonment. To comprehensively assess the state of the art, we present the first systematic usability evaluation of a wide range of static analysis tools. We derived a set of 36 relevant criteria from the literature and used them to evaluate a total of 46 static analysis tools complying with our inclusion and exclusion criteria - a representative set of mainly non-proprietary tools. The evaluation against the usability criteria in a multiple-raters approach shows that two thirds of the considered tools off er poor warning messages, while about three-quarters provide hardly any fix support. Furthermore, the integration of user knowledge is strongly neglected, which could be used for instance, to improve handling of false positives. Finally, issues regarding workflow integration and specialized user interfaces are revealed. These findings should prove useful in guiding and focusing further research and development in user experience for static code analyses.}},
  author       = {{Nachtigall, Marcus and Schlichtig, Michael and Bodden, Eric}},
  booktitle    = {{Software Engineering 2023}},
  isbn         = {{978-3-88579-726-5}},
  keywords     = {{Automated static analysis, Software usability}},
  pages        = {{95–96}},
  publisher    = {{Gesellschaft für Informatik e.V.}},
  title        = {{{Evaluation of Usability Criteria Addressed by Static Analysis Tools on a Large Scale}}},
  year         = {{2023}},
}

@inproceedings{52816,
  abstract     = {{Manufacturing companies face the challenge of reaching required quality standards. Using
optical sensors and deep learning might help. However, training deep learning algorithms
require large amounts of visual training data. Using domain randomization to generate synthetic
image data can alleviate this bottleneck. This paper presents the application of synthetic
image training data for optical quality inspections using visual sensor technology. The results
show synthetically generated training data are appropriate for visual quality inspections.}},
  author       = {{Gräßler, Iris and Hieb, Michael}},
  booktitle    = {{Lectures}},
  keywords     = {{synthetic training data, machine vision quality gates, deep learning, automated inspection and quality control, production control}},
  location     = {{Nuremberg}},
  pages        = {{253--524}},
  publisher    = {{AMA Service GmbH, Von-Münchhausen-Str. 49, 31515 Wunstorf, Germany}},
  title        = {{{Creating Synthetic Training Datasets for Inspection in Machine Vision Quality Gates in Manufacturing}}},
  doi          = {{10.5162/smsi2023/d7.4}},
  year         = {{2023}},
}

@inproceedings{32410,
  abstract     = {{Static analysis tools support developers in detecting potential coding issues, such as bugs or vulnerabilities. Research on static analysis emphasizes its technical challenges but also mentions severe usability shortcomings. These shortcomings hinder the adoption of static analysis tools, and in some cases, user dissatisfaction even leads to tool abandonment.
To comprehensively assess the current state of the art, this paper presents the first systematic usability evaluation in a wide range of static analysis tools. We derived a set of 36 relevant criteria from the scientific literature and gathered a collection of 46 static analysis tools complying with our inclusion and exclusion criteria - a representative set of mainly non-proprietary tools. Then, we evaluated how well these tools fulfill the aforementioned criteria.
The evaluation shows that more than half of the considered tools offer poor warning messages, while about three-quarters of the tools provide hardly any fix support. Furthermore, the integration of user knowledge is strongly neglected, which could be used for improved handling of false positives and tuning the results for the corresponding developer. Finally, issues regarding workflow integration and specialized user interfaces are proved further.
These findings should prove useful in guiding and focusing further research and development in the area of user experience for static code analyses.}},
  author       = {{Nachtigall, Marcus and Schlichtig, Michael and Bodden, Eric}},
  booktitle    = {{Proceedings of the 31st ACM SIGSOFT International Symposium on Software Testing and Analysis}},
  isbn         = {{9781450393799}},
  keywords     = {{Automated static analysis, Software usability}},
  pages        = {{532 -- 543}},
  publisher    = {{ACM}},
  title        = {{{A Large-Scale Study of Usability Criteria Addressed by Static Analysis Tools}}},
  doi          = {{10.1145/3533767}},
  year         = {{2022}},
}

@article{21004,
  abstract     = {{Automated machine learning (AutoML) supports the algorithmic construction and data-specific customization of machine learning pipelines, including the selection, combination, and parametrization of machine learning algorithms as main constituents. Generally speaking, AutoML approaches comprise two major components: a search space model and an optimizer for traversing the space. Recent approaches have shown impressive results in the realm of supervised learning, most notably (single-label) classification (SLC). Moreover, first attempts at extending these approaches towards multi-label classification (MLC) have been made. While the space of candidate pipelines is already huge in SLC, the complexity of the search space is raised to an even higher power in MLC. One may wonder, therefore, whether and to what extent optimizers established for SLC can scale to this increased complexity, and how they compare to each other. This paper makes the following contributions: First, we survey existing approaches to AutoML for MLC. Second, we augment these approaches with optimizers not previously tried for MLC. Third, we propose a benchmarking framework that supports a fair and systematic comparison. Fourth, we conduct an extensive experimental study, evaluating the methods on a suite of MLC problems. We find a grammar-based best-first search to compare favorably to other optimizers.}},
  author       = {{Wever, Marcel Dominik and Tornede, Alexander and Mohr, Felix and Hüllermeier, Eyke}},
  issn         = {{0162-8828}},
  journal      = {{IEEE Transactions on Pattern Analysis and Machine Intelligence}},
  keywords     = {{Automated Machine Learning, Multi Label Classification, Hierarchical Planning, Bayesian Optimization}},
  pages        = {{1--1}},
  title        = {{{AutoML for Multi-Label Classification: Overview and Empirical Evaluation}}},
  doi          = {{10.1109/tpami.2021.3051276}},
  year         = {{2021}},
}

@inbook{48881,
  abstract     = {{Classic automated algorithm selection (AS) for (combinatorial) optimization problems heavily relies on so-called instance features, i.e., numerical characteristics of the problem at hand ideally extracted with computationally low-demanding routines. For the traveling salesperson problem (TSP) a plethora of features have been suggested. Most of these features are, if at all, only normalized imprecisely raising the issue of feature values being strongly affected by the instance size. Such artifacts may have detrimental effects on algorithm selection models. We propose a normalization for two feature groups which stood out in multiple AS studies on the TSP: (a) features based on a minimum spanning tree (MST) and (b) a k-nearest neighbor graph (NNG) transformation of the input instance. To this end we theoretically derive minimum and maximum values for properties of MSTs and k-NNGs of Euclidean graphs. We analyze the differences in feature space between normalized versions of these features and their unnormalized counterparts. Our empirical investigations on various TSP benchmark sets point out that the feature scaling succeeds in eliminating the effect of the instance size. Eventually, a proof-of-concept AS-study shows promising results: models trained with normalized features tend to outperform those trained with the respective vanilla features.}},
  author       = {{Heins, Jonathan and Bossek, Jakob and Pohl, Janina and Seiler, Moritz and Trautmann, Heike and Kerschke, Pascal}},
  booktitle    = {{Proceedings of the 16th ACM/SIGEVO Conference on Foundations of Genetic Algorithms}},
  isbn         = {{978-1-4503-8352-3}},
  keywords     = {{automated algorithm selection, graph theory, instance features, normalization, traveling salesperson problem (TSP)}},
  pages        = {{1–15}},
  publisher    = {{Association for Computing Machinery}},
  title        = {{{On the Potential of Normalized TSP Features for Automated Algorithm Selection}}},
  year         = {{2021}},
}

@inproceedings{48897,
  abstract     = {{In this work we focus on the well-known Euclidean Traveling Salesperson Problem (TSP) and two highly competitive inexact heuristic TSP solvers, EAX and LKH, in the context of per-instance algorithm selection (AS). We evolve instances with nodes where the solvers show strongly different performance profiles. These instances serve as a basis for an exploratory study on the identification of well-discriminating problem characteristics (features). Our results in a nutshell: we show that even though (1) promising features exist, (2) these are in line with previous results from the literature, and (3) models trained with these features are more accurate than models adopting sophisticated feature selection methods, the advantage is not close to the virtual best solver in terms of penalized average runtime and so is the performance gain over the single best solver. However, we show that a feature-free deep neural network based approach solely based on visual representation of the instances already matches classical AS model results and thus shows huge potential for future studies.}},
  author       = {{Seiler, Moritz and Pohl, Janina and Bossek, Jakob and Kerschke, Pascal and Trautmann, Heike}},
  booktitle    = {{Parallel Problem Solving from {Nature} (PPSN XVI)}},
  isbn         = {{978-3-030-58111-4}},
  keywords     = {{Automated algorithm selection, Deep learning, Feature-based approaches, Traveling Salesperson Problem}},
  pages        = {{48–64}},
  publisher    = {{Springer-Verlag}},
  title        = {{{Deep Learning as a Competitive Feature-Free Approach for Automated Algorithm Selection on the Traveling Salesperson Problem}}},
  doi          = {{10.1007/978-3-030-58112-1_4}},
  year         = {{2020}},
}

@inproceedings{3852,
  abstract     = {{In automated machine learning (AutoML), the process of engineering machine learning applications with respect to a specific problem is (partially) automated.
Various AutoML tools have already been introduced to provide out-of-the-box machine learning functionality.
More specifically, by selecting machine learning algorithms and optimizing their hyperparameters, these tools produce a machine learning pipeline tailored to the problem at hand.
Except for TPOT, all of these tools restrict the maximum number of processing steps of such a pipeline.
However, as TPOT follows an evolutionary approach, it suffers from performance issues when dealing with larger datasets.
In this paper, we present an alternative approach leveraging a hierarchical planning to configure machine learning pipelines that are unlimited in length.
We evaluate our approach and find its performance to be competitive with other AutoML tools, including TPOT.}},
  author       = {{Wever, Marcel Dominik and Mohr, Felix and Hüllermeier, Eyke}},
  booktitle    = {{ICML 2018 AutoML Workshop}},
  keywords     = {{automated machine learning, complex pipelines, hierarchical planning}},
  location     = {{Stockholm, Sweden}},
  title        = {{{ML-Plan for Unlimited-Length Machine Learning Pipelines}}},
  year         = {{2018}},
}

@article{48884,
  abstract     = {{The Travelling Salesperson Problem (TSP) is one of the best-studied NP-hard problems. Over the years, many different solution approaches and solvers have been developed. For the first time, we directly compare five state-of-the-art inexact solvers\textemdash namely, LKH, EAX, restart variants of those, and MAOS\textemdash on a large set of well-known benchmark instances and demonstrate complementary performance, in that different instances may be solved most effectively by different algorithms. We leverage this complementarity to build an algorithm selector, which selects the best TSP solver on a per-instance basis and thus achieves significantly improved performance compared to the single best solver, representing an advance in the state of the art in solving the Euclidean TSP. Our in-depth analysis of the selectors provides insight into what drives this performance improvement.}},
  author       = {{Kerschke, Pascal and Kotthoff, Lars and Bossek, Jakob and Hoos, Holger H. and Trautmann, Heike}},
  issn         = {{1063-6560}},
  journal      = {{Evolutionary Computation}},
  keywords     = {{automated algorithm selection, machine learning., performance modeling, Travelling Salesperson Problem}},
  number       = {{4}},
  pages        = {{597–620}},
  title        = {{{Leveraging TSP Solver Complementarity through Machine Learning}}},
  doi          = {{10.1162/evco_a_00215}},
  volume       = {{26}},
  year         = {{2018}},
}

