@inproceedings{3287, abstract = {{For optimal placement and orchestration of network services, it is crucial that their structure and semantics are specified clearly and comprehensively and are available to an orchestrator. Existing specification approaches are either ambiguous or miss important aspects regarding the behavior of virtual network functions (VNFs) forming a service. We propose to formally and unambiguously specify the behavior of these functions and services using Queuing Petri Nets (QPNs). QPNs are an established method that allows to express queuing, synchronization, stochastically distributed processing delays, and changing traffic volume and characteristics at each VNF. With QPNs, multiple VNFs can be connected to complete network services in any structure, even specifying bidirectional network services containing loops. We discuss how management and orchestration systems can benefit from our clear and comprehensive specification approach, leading to better placement of VNFs and improved Quality of Service. Another benefit of formally specifying network services with QPNs are diverse analysis options, which allow valuable insights such as the distribution of end-to-end delay. We propose a tool-based workflow that supports the specification of network services and the automatic generation of corresponding simulation code to enable an in-depth analysis of their behavior and performance.}}, author = {{Schneider, Stefan Balthasar and Sharma, Arnab and Karl, Holger and Wehrheim, Heike}}, booktitle = {{2019 IFIP/IEEE International Symposium on Integrated Network Management (IM)}}, location = {{Washington, DC, USA}}, pages = {{116----124}}, publisher = {{IFIP}}, title = {{{Specifying and Analyzing Virtual Network Services Using Queuing Petri Nets}}}, year = {{2019}}, } @inproceedings{7752, author = {{Sharma, Arnab and Wehrheim, Heike}}, booktitle = {{Proceedings of the Software Engineering Conference (SE)}}, isbn = {{978-3-88579-686-2}}, location = {{Stuttgart}}, pages = {{157 -- 158}}, publisher = {{Gesellschaft für Informatik e.V. (GI)}}, title = {{{Testing Balancedness of ML Algorithms}}}, volume = {{P-292}}, year = {{2019}}, } @inproceedings{7635, author = {{Sharma, Arnab and Wehrheim, Heike}}, booktitle = {{IEEE International Conference on Software Testing, Verification and Validation (ICST)}}, location = {{Xi'an, China, April, 2019}}, pages = {{125----135}}, publisher = {{IEEE}}, title = {{{Testing Machine Learning Algorithms for Balanced Data Usage}}}, year = {{2019}}, } @inproceedings{10094, author = {{Sharma, Arnab and Wehrheim, Heike}}, booktitle = {{Software Engineering and Software Management, {SE/SWM} 2019, Stuttgart, Germany, February 18-22, 2019}}, editor = {{Becker, Steffen and Bogicevic, Ivan and Herzwurm, Georg and Wagner, Stefan}}, pages = {{157--158}}, publisher = {{{GI}}}, title = {{{Testing Balancedness of ML Algorithms}}}, doi = {{10.18420/se2019-48}}, volume = {{{P-292}}}, year = {{2019}}, } @inproceedings{19656, author = {{Sharma, Arnab and Wehrheim, Heike}}, booktitle = {{Proceedings of the 32th IFIP International Conference on Testing Software and Systems (ICTSS)}}, publisher = {{Springer}}, title = {{{Automatic Fairness Testing of Machine Learning Models}}}, year = {{2020}}, } @article{20279, author = {{Sharma, Arnab and Wehrheim, Heike}}, journal = {{CoRR}}, title = {{{Testing Monotonicity of Machine Learning Models}}}, volume = {{abs/2002.12278}}, year = {{2020}}, } @inproceedings{16724, author = {{Sharma, Arnab and Wehrheim, Heike}}, booktitle = {{Proceedings of the ACM SIGSOFT International Symposium on Software Testing and Analysis (ISSTA).}}, publisher = {{ACM}}, title = {{{Higher Income, Larger Loan? Monotonicity Testing of Machine Learning Models}}}, year = {{2020}}, } @article{25213, author = {{Sharma, Arnab and Demir, Caglar and Ngonga Ngomo, Axel-Cyrille and Wehrheim, Heike}}, journal = {{CoRR}}, title = {{{MLCheck- Property-Driven Testing of Machine Learning Models}}}, volume = {{abs/2105.00741}}, year = {{2021}}, } @inproceedings{28350, abstract = {{In recent years, we observe an increasing amount of software with machine learning components being deployed. This poses the question of quality assurance for such components: how can we validate whether specified requirements are fulfilled by a machine learned software? Current testing and verification approaches either focus on a single requirement (e.g., fairness) or specialize on a single type of machine learning model (e.g., neural networks). In this paper, we propose property-driven testing of machine learning models. Our approach MLCheck encompasses (1) a language for property specification, and (2) a technique for systematic test case generation. The specification language is comparable to property-based testing languages. Test case generation employs advanced verification technology for a systematic, property dependent construction of test suites, without additional user supplied generator functions. We evaluate MLCheck using requirements and data sets from three different application areas (software discrimination, learning on knowledge graphs and security). Our evaluation shows that despite its generality MLCheck can even outperform specialised testing approaches while having a comparable runtime}}, author = {{Sharma, Arnab and Demir, Caglar and Ngonga Ngomo, Axel-Cyrille and Wehrheim, Heike}}, booktitle = {{Proceedings of the 20th IEEE International Conference on Machine Learning and Applications (ICMLA)}}, publisher = {{IEEE}}, title = {{{MLCHECK–Property-Driven Testing of Machine Learning Classifiers}}}, year = {{2021}}, } @inproceedings{32311, abstract = {{Testing is one of the most frequent means of quality assurance for software. Property-based testing aims at generating test suites for checking code against user-defined properties. Test input generation is, however, most often independent of the property to be checked, and is instead based on random or user-defined data generation.In this paper, we present property-driven unit testing of functions with numerical inputs and outputs. Alike property-based testing, it allows users to define the properties to be tested for. Contrary to property-based testing, it also uses the property for a targeted generation of test inputs. Our approach is a form of learning-based testing where we first of all learn a model of a given black-box function using standard machine learning algorithms, and in a second step use model and property for test input generation. This allows us to test both predefined functions as well as machine learned regression models. Our experimental evaluation shows that our property-driven approach is more effective than standard property-based testing techniques.}}, author = {{Sharma, Arnab and Melnikov, Vitaly and Hüllermeier, Eyke and Wehrheim, Heike}}, booktitle = {{Proceedings of the 10th IEEE/ACM International Conference on Formal Methods in Software Engineering (FormaliSE)}}, pages = {{113--123}}, publisher = {{IEEE}}, title = {{{Property-Driven Testing of Black-Box Functions}}}, year = {{2022}}, } @inbook{45886, author = {{Wehrheim, Heike and Hüllermeier, Eyke and Becker, Steffen and Becker, Matthias and Richter, Cedric and Sharma, Arnab}}, booktitle = {{On-The-Fly Computing -- Individualized IT-services in dynamic markets}}, editor = {{Haake, Claus-Jochen and Meyer auf der Heide, Friedhelm and Platzner, Marco and Wachsmuth, Henning and Wehrheim, Heike}}, pages = {{105--123}}, publisher = {{Heinz Nixdorf Institut, Universität Paderborn}}, title = {{{Composition Analysis in Unknown Contexts}}}, doi = {{10.5281/zenodo.8068510}}, volume = {{412}}, year = {{2023}}, }