@inproceedings{61229,
  author       = {{Muschalik, Maximilian and Fumagalli, Fabian and Frazzetto, Paolo and Strotherm, Janine and Hermes, Luca and Sperduti, Alessandro and Hüllermeier, Eyke and Hammer, Barbara}},
  booktitle    = {{The Thirteenth International Conference on Learning Representations (ICLR)}},
  title        = {{{Exact Computation of Any-Order Shapley Interactions for Graph Neural Networks}}},
  year         = {{2025}},
}

@inproceedings{61232,
  author       = {{Visser, Roel and Fumagalli, Fabian and Hüllermeier, Eyke and Hammer, Barbara}},
  booktitle    = {{Proceedings of the European Symposium on Artificial Neural Networks (ESANN)}},
  keywords     = {{FF}},
  title        = {{{Explaining Outliers using Isolation Forest and Shapley Interactions}}},
  year         = {{2025}},
}

@inproceedings{61231,
  author       = {{Fumagalli, Fabian and Muschalik, Maximilian and Hüllermeier, Eyke and Hammer, Barbara and Herbinger, Julia}},
  booktitle    = {{Proceedings of The 28th International Conference on Artificial Intelligence and Statistics (AISTATS)}},
  pages        = {{5140--5148}},
  publisher    = {{PMLR}},
  title        = {{{Unifying Feature-Based Explanations with Functional ANOVA and Cooperative Game Theory}}},
  volume       = {{258}},
  year         = {{2025}},
}

@inproceedings{59856,
  abstract     = {{Recent advances on instruction fine-tuning have led to the development of various prompting techniques for large language models, such as explicit reasoning steps. However, the success of techniques depends on various parameters, such as the task, language model, and context provided. Finding an effective prompt is, therefore, often a trial-and-error process. Most existing approaches to automatic prompting aim to optimize individual techniques instead of compositions of techniques and their dependence on the input. To fill this gap, we propose an adaptive prompting approach that predicts the optimal prompt composition ad-hoc for a given input. We apply our approach to social bias detection, a highly context-dependent task that requires semantic understanding. We evaluate it with three large language models on three datasets, comparing compositions to individual techniques and other baselines. The results underline the importance of finding an effective prompt composition. Our approach robustly ensures high detection performance, and is best in several settings. Moreover, first experiments on other tasks support its generalizability.}},
  author       = {{Spliethöver, Maximilian and Knebler, Tim and Fumagalli, Fabian and Muschalik, Maximilian and Hammer, Barbara and Hüllermeier, Eyke and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)}},
  editor       = {{Chiruzzo, Luis and Ritter, Alan and Wang, Lu}},
  isbn         = {{979-8-89176-189-6}},
  pages        = {{2421–2449}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{Adaptive Prompting: Ad-hoc Prompt Composition for Social Bias Detection}}},
  year         = {{2025}},
}

@inproceedings{58224,
  author       = {{Kenneweg, Philip and Kenneweg, Tristan and Fumagalli, Fabian and Hammer, Barbara}},
  booktitle    = {{2024 International Joint Conference on Neural Networks (IJCNN)}},
  keywords     = {{Training, Schedules, Codes, Search methods, Source coding, Computer architecture, Transformers}},
  pages        = {{1--8}},
  title        = {{{No learning rates needed: Introducing SALSA - Stable Armijo Line Search Adaptation}}},
  doi          = {{10.1109/IJCNN60899.2024.10650124}},
  year         = {{2024}},
}

@inproceedings{53073,
  abstract     = {{While shallow decision trees may be interpretable, larger ensemble models like gradient-boosted trees, which often set the state of the art in machine learning problems involving tabular data, still remain black box models. As a remedy, the Shapley value (SV) is a well-known concept in explainable artificial intelligence (XAI) research for quantifying additive feature attributions of predictions. The model-specific TreeSHAP methodology solves the exponential complexity for retrieving exact SVs from tree-based models. Expanding beyond individual feature attribution, Shapley interactions reveal the impact of intricate feature interactions of any order. In this work, we present TreeSHAP-IQ, an efficient method to compute any-order additive Shapley interactions for predictions of tree-based models. TreeSHAP-IQ is supported by a mathematical framework that exploits polynomial arithmetic to compute the interaction scores in a single recursive traversal of the tree, akin to Linear TreeSHAP. We apply TreeSHAP-IQ on state-of-the-art tree ensembles and explore interactions on well-established benchmark datasets.}},
  author       = {{Muschalik, Maximilian and Fumagalli, Fabian and Hammer, Barbara and Huellermeier, Eyke}},
  booktitle    = {{Proceedings of the AAAI Conference on Artificial Intelligence (AAAI)}},
  issn         = {{2374-3468}},
  keywords     = {{Explainable Artificial Intelligence}},
  number       = {{13}},
  pages        = {{14388--14396}},
  title        = {{{Beyond TreeSHAP: Efficient Computation of Any-Order Shapley Interactions for Tree Ensembles}}},
  doi          = {{10.1609/aaai.v38i13.29352}},
  volume       = {{38}},
  year         = {{2024}},
}

@inproceedings{55311,
  abstract     = {{Addressing the limitations of individual attribution scores via the Shapley value (SV), the field of explainable AI (XAI) has recently explored intricate interactions of features or data points. In particular, extensions of the SV, such as the Shapley Interaction Index (SII), have been proposed as a measure to still benefit from the axiomatic basis of the SV. However, similar to the SV, their exact computation remains computationally prohibitive. Hence, we propose with SVARM-IQ a sampling-based approach to efficiently approximate Shapley-based interaction indices of any order. SVARM-IQ can be applied to a broad class of interaction indices, including the SII, by leveraging a novel stratified representation. We provide non-asymptotic theoretical guarantees on its approximation quality and empirically demonstrate that SVARM-IQ achieves state-of-the-art estimation results in practical XAI scenarios on different model classes and application domains.}},
  author       = {{Kolpaczki, Patrick and Muschalik, Maximilian and Fumagalli, Fabian and Hammer, Barbara and Huellermeier, Eyke}},
  booktitle    = {{Proceedings of The 27th International Conference on Artificial Intelligence and Statistics (AISTATS)}},
  pages        = {{3520–3528}},
  publisher    = {{PMLR}},
  title        = {{{SVARM-IQ: Efficient Approximation of Any-order Shapley Interactions through Stratification}}},
  volume       = {{238}},
  year         = {{2024}},
}

@inproceedings{58223,
  abstract     = {{The Shapley value (SV) is a prevalent approach of allocating credit to machine learning (ML) entities to understand black box ML models. Enriching such interpretations with higher-order interactions is inevitable for complex systems, where the Shapley Interaction Index (SII) is a direct axiomatic extension of the SV. While it is well-known that the SV yields an optimal approximation of any game via a weighted least square (WLS) objective, an extension of this result to SII has been a long-standing open problem, which even led to the proposal of an alternative index. In this work, we characterize higher-order SII as a solution to a WLS problem, which constructs an optimal approximation via SII and k-Shapley values (k-SII). We prove this representation for the SV and pairwise SII and give empirically validated conjectures for higher orders. As a result, we propose KernelSHAP-IQ, a direct extension of KernelSHAP for SII, and demonstrate state-of-the-art performance for feature interactions.}},
  author       = {{Fumagalli, Fabian and Muschalik, Maximilian and Kolpaczki, Patrick and Hüllermeier, Eyke and Hammer, Barbara}},
  booktitle    = {{Proceedings of the 41st International Conference on Machine Learning (ICML)}},
  pages        = {{14308–14342}},
  publisher    = {{PMLR}},
  title        = {{{KernelSHAP-IQ: Weighted Least Square Optimization for Shapley Interactions}}},
  volume       = {{235}},
  year         = {{2024}},
}

@inproceedings{61228,
  author       = {{Muschalik, Maximilian and Baniecki, Hubert and Fumagalli, Fabian and Kolpaczki, Patrick and Hammer, Barbara and Huellermeier, Eyke}},
  booktitle    = {{Advances in Neural Information Processing Systems (NeurIPS)}},
  pages        = {{130324–130357}},
  title        = {{{shapiq: Shapley interactions for machine learning}}},
  volume       = {{37}},
  year         = {{2024}},
}

@inproceedings{61230,
  author       = {{Kolpaczki, Patrick and Bengs, Viktor and Muschalik, Maximilian and Hüllermeier, Eyke}},
  booktitle    = {{Proceedings of the AAAI conference on Artificial Intelligence (AAAI)}},
  number       = {{12}},
  pages        = {{13246–13255}},
  title        = {{{Approximating the shapley value without marginal contributions}}},
  volume       = {{38}},
  year         = {{2024}},
}

@article{50262,
  abstract     = {{<jats:title>Abstract</jats:title><jats:p>Explainable artificial intelligence has mainly focused on static learning scenarios so far. We are interested in dynamic scenarios where data is sampled progressively, and learning is done in an incremental rather than a batch mode. We seek efficient incremental algorithms for computing feature importance (FI). Permutation feature importance (PFI) is a well-established model-agnostic measure to obtain global FI based on feature marginalization of absent features. We propose an efficient, model-agnostic algorithm called iPFI to estimate this measure incrementally and under dynamic modeling conditions including concept drift. We prove theoretical guarantees on the approximation quality in terms of expectation and variance. To validate our theoretical findings and the efficacy of our approaches in incremental scenarios dealing with streaming data rather than traditional batch settings, we conduct multiple experimental studies on benchmark data with and without concept drift.</jats:p>}},
  author       = {{Fumagalli, Fabian and Muschalik, Maximilian and Hüllermeier, Eyke and Hammer, Barbara}},
  issn         = {{0885-6125}},
  journal      = {{Machine Learning}},
  keywords     = {{Artificial Intelligence, Software}},
  number       = {{12}},
  pages        = {{4863--4903}},
  publisher    = {{Springer Science and Business Media LLC}},
  title        = {{{Incremental permutation feature importance (iPFI): towards online explanations on data streams}}},
  doi          = {{10.1007/s10994-023-06385-y}},
  volume       = {{112}},
  year         = {{2023}},
}

@inproceedings{48778,
  author       = {{Muschalik, Maximilian and Fumagalli, Fabian and Jagtani, Rohit and Hammer, Barbara and Huellermeier, Eyke}},
  booktitle    = {{Proceedings of the World Conference on Explainable Artificial Intelligence (xAI)}},
  isbn         = {{9783031440632}},
  issn         = {{9783031440649}},
  title        = {{{iPDP: On Partial Dependence Plots in Dynamic Modeling Scenarios}}},
  doi          = {{10.1007/978-3-031-44064-9_11}},
  year         = {{2023}},
}

@inbook{48776,
  author       = {{Muschalik, Maximilian and Fumagalli, Fabian and Hammer, Barbara and Huellermeier, Eyke}},
  booktitle    = {{Machine Learning and Knowledge Discovery in Databases: Research Track - European Conference (ECML PKDD)}},
  isbn         = {{9783031434174}},
  issn         = {{1611-3349}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{iSAGE: An Incremental Version of SAGE for Online Explanation on Data Streams}}},
  doi          = {{10.1007/978-3-031-43418-1_26}},
  year         = {{2023}},
}

@inproceedings{48775,
  author       = {{Fumagalli, Fabian and Muschalik, Maximilian and Hüllermeier, Eyke and Hammer, Barbara}},
  booktitle    = {{Proceedings of the European Symposium on Artificial Neural Networks (ESANN)}},
  location     = {{Bruges (Belgium) and online}},
  title        = {{{On Feature Removal for Explainability in Dynamic Environments}}},
  doi          = {{10.14428/ESANN/2023.ES2023-148}},
  year         = {{2023}},
}

@inproceedings{52230,
  author       = {{Fumagalli, Fabian and Muschalik, Maximilian and Kolpaczki, Patrick and Hüllermeier, Eyke and Hammer, Barbara}},
  booktitle    = {{Advances in Neural Information Processing Systems (NeurIPS)}},
  pages        = {{11515----11551}},
  title        = {{{SHAP-IQ: Unified Approximation of any-order Shapley Interactions}}},
  volume       = {{36}},
  year         = {{2023}},
}

@article{48780,
  abstract     = {{Explainable Artificial Intelligence (XAI) has mainly focused on static learning tasks so far. In this paper, we consider XAI in the context of online learning in dynamic environments, such as learning from real-time data streams, where models are learned incrementally and continuously adapted over the course of time. More specifically, we motivate the problem of explaining model change, i.e. explaining the difference between models before and after adaptation, instead of the models themselves. In this regard, we provide the first efficient model-agnostic approach to dynamically detecting, quantifying, and explaining significant model changes. Our approach is based on an adaptation of the well-known Permutation Feature Importance (PFI) measure. It includes two hyperparameters that control the sensitivity and directly influence explanation frequency, so that a human user can adjust the method to individual requirements and application needs. We assess and validate our method’s efficacy on illustrative synthetic data streams with three popular model classes.}},
  author       = {{Muschalik, Maximilian and Fumagalli, Fabian and Hammer, Barbara and Huellermeier, Eyke}},
  issn         = {{0933-1875}},
  journal      = {{KI - Künstliche Intelligenz}},
  keywords     = {{Artificial Intelligence}},
  number       = {{3-4}},
  pages        = {{211--224}},
  publisher    = {{Springer Science and Business Media LLC}},
  title        = {{{Agnostic Explanation of Model Change based on Feature Importance}}},
  doi          = {{10.1007/s13218-022-00766-6}},
  volume       = {{36}},
  year         = {{2022}},
}

