@inproceedings{64823,
  abstract     = {{Current legal frameworks enforce that Android developers accurately report the data their apps collect. However, large codebases can make this reporting challenging. This paper employs an empirical approach to understand developers' experience with Google Play Store's Data Safety Section (DSS) form.

We first survey 41 Android developers to understand how they categorize privacy-related data into DSS categories and how confident they feel when completing the DSS form. To gain a broader and more detailed view of the challenges developers encounter during the process, we complement the survey with an analysis of 172 online developer discussions, capturing the perspectives of 642 additional developers. Together, these two data sources represent insights from 683 developers.

Our findings reveal that developers often manually classify the privacy-related data their apps collect into the data categories defined by Google-or, in some cases, omit classification entirely-and rely heavily on existing online resources when completing the form. Moreover, developers are generally confident in recognizing the data their apps collect, yet they lack confidence in translating this knowledge into DSS-compliant disclosures. Key challenges include issues in identifying privacy-relevant data to complete the form, limited understanding of the form, and concerns about app rejection due to discrepancies with Google's privacy requirements.
These results underscore the need for clearer guidance and more accessible tooling to support developers in meeting privacy-aware reporting obligations. }},
  author       = {{Khedkar, Mugdha and Schlichtig, Michael and Soliman, Mohamed Aboubakr Mohamed and Bodden, Eric}},
  booktitle    = {{Proceedings of the IEEE/ACM 13th International Conference on Mobile Software Engineering and Systems (MOBILESoft '26). Association for Computing Machinery, New York, NY, USA, 65–68.}},
  keywords     = {{static analysis, data collection, data protection, privacy-aware reporting}},
  location     = {{Rio de Janeiro, Brazil}},
  title        = {{{Challenges in Android Data Disclosure: An Empirical Study.}}},
  year         = {{2026}},
}

@article{64821,
  author       = {{Khedkar, Mugdha and Schlichtig, Michael and Atakishiyev, Nihad and Bodden, Eric}},
  journal      = {{Automated Software Engineering }},
  number       = {{2}},
  publisher    = {{Springer US}},
  title        = {{{Between Law and Code: Challenges and Opportunities for Automating Privacy Assessments}}},
  doi          = {{10.1007/s10515-026-00601-4}},
  volume       = {{33}},
  year         = {{2026}},
}

@inproceedings{64909,
  author       = {{Khedkar, Mugdha and Schlichtig, Michael and Bodden, Eric}},
  booktitle    = {{IEEE International Conference on Software Analysis, Evolution and Reengineering (SANER 2026)}},
  title        = {{{Source Code-Driven GDPR Documentation: Supporting RoPA with Assessor View}}},
  year         = {{2026}},
}

@unpublished{65017,
  abstract     = {{Static Application Security Testing (SAST) tools play a vital role in modern software development by automatically detecting potential vulnerabilities in source code. However, their effectiveness is often limited by a high rate of false positives, which wastes developer's effort and undermines trust in automated analysis. This work presents a Graph Convolutional Network (GCN) model designed to predict SAST reports as true and false positive. The model leverages Code Property Graphs (CPGs) constructed from static analysis results to capture both, structural and semantic relationships within code. Trained on the CamBenchCAP dataset, the model achieved an accuracy of 100% on the test set using an 80/20 train-test split. Evaluation on the CryptoAPI-Bench benchmark further demonstrated the model's practical applicability, reaching an overall accuracy of up to 96.6%. A detailed qualitative inspection revealed that many cases marked as misclassifications corresponded to genuine security weaknesses, indicating that the model effectively reflects conservative, security-aware reasoning. Identified limitations include incomplete control-flow representation due to missing interprocedural connections. Future work will focus on integrating call graphs, applying graph explainability techniques, and extending training data across multiple SAST tools to improve generalization and interpretability.}},
  author       = {{Ohlmer, Tom and Schlichtig, Michael and Bodden, Eric}},
  booktitle    = {{arXiv:2603.10558}},
  title        = {{{FP-Predictor - False Positive Prediction for Static Analysis Reports}}},
  year         = {{2026}},
}

@inproceedings{65030,
  author       = {{Amaral, Luis and Schlichtig, Michael and Emanuel, Wagner and Almeida, Joilton and Ferreira, Carine and Kempf, Jérôme and Bonifácio, Rodrigo and Bodden, Eric and Peotta, Laerte and Pinto, Gustavo and Ribeiro, Márcio}},
  booktitle    = {{2026 IEEE International Conference on Software Analysis, Evolution and Reengineering (SANER)}},
  title        = {{{From Legacy Designs to Vulnerability Fixes: Understanding SAST Adoption in Non-Technological Companies}}},
  year         = {{2026}},
}

@unpublished{65018,
  abstract     = {{Android applications collecting data from users must protect it according to the current legal frameworks. Such data protection has become even more important since in 2018 the European Union rolled out the General Data Protection Regulation (GDPR). Since app developers are not legal experts, they find it difficult to integrate privacy-aware practices into source code development. Despite these legal obligations, developers have limited tool support to reason about data protection throughout their app development process.
  This paper explores the use of static program slicing and software visualization to analyze privacy-relevant data flows in Android apps. We introduce SliceViz, a web tool that analyzes an Android app by slicing all privacy-relevant data sources detected in the source code on the back-end. It then helps developers by visualizing these privacy-relevant program slices.
  We conducted a user study with 12 participants demonstrating that SliceViz effectively aids developers in identifying privacy-relevant properties in Android apps.
  Our findings indicate that program slicing can be employed to identify and reason about privacy-relevant data flows in Android applications. With further usability improvements, developers can be better equipped to handle privacy-sensitive information.}},
  author       = {{Khedkar, Mugdha and Schlichtig, Michael and Mohan, Santhosh and Bodden, Eric}},
  booktitle    = {{arXiv:2503.16640}},
  title        = {{{Visualizing Privacy-Relevant Data Flows in Android Applications}}},
  year         = {{2025}},
}

@misc{52663,
  abstract     = {{Context
Static analyses are well-established to aid in understanding bugs or vulnerabilities during the development process or in large-scale studies. A low false-positive rate is essential for the adaption in practice and for precise results of empirical studies. Unfortunately, static analyses tend to report where a vulnerability manifests rather than the fix location. This can cause presumed false positives or imprecise results.
Method
To address this problem, we designed an adaption of an existing static analysis algorithm that can distinguish between a manifestation and fix location, and reports error chains. An error chain represents at least two interconnected errors that occur successively, thus building the connection between the fix and manifestation location. We used our tool CogniCryptSUBS for a case study on 471 GitHub repositories, a performance benchmark to compare different analysis configurations, and conducted an expert interview.
Result
We found that 50 % of the projects with a report had at least one error chain. Our runtime benchmark demonstrated that our improvement caused only a minimal runtime overhead of less than 4 %. The results of our expert interview indicate that with our adapted version participants require fewer executions of the analysis.
Conclusion
Our results indicate that error chains occur frequently in real-world projects, and ignoring them can lead to imprecise evaluation results. The runtime benchmark indicates that our tool is a feasible and efficient solution for detecting error chains in real-world projects. Further, our results gave a hint that the usability of static analyses may benefit from supporting error chains.}},
  author       = {{Wickert, Anna-Katharina and Schlichtig, Michael and Vogel, Marvin and Winter, Lukas and Mezini, Mira and Bodden, Eric}},
  keywords     = {{Static analysis, error chains, false positive re- duction, empirical studies}},
  title        = {{{Supporting Error Chains in Static Analysis for Precise Evaluation Results and Enhanced Usability}}},
  year         = {{2024}},
}

@inproceedings{56140,
  abstract     = {{    Android apps collecting data from users must comply with legal frameworks to ensure data protection. This requirement has become even more important since the implementation of the General Data Protection Regulation (GDPR) by the European Union in 2018. Moreover, with the proposed Cyber Resilience Act on the horizon, stakeholders will soon need to assess software against even more stringent security and privacy standards. Effective privacy assessments require collaboration among groups with diverse expertise to function effectively as a cohesive unit.
    This paper motivates the need for an automated approach that enhances understanding of data protection in Android apps and improves communication between the various parties involved in privacy assessments. We propose the Assessor View, a tool designed to bridge the knowledge gap between these parties, facilitating more effective privacy assessments of Android applications. }},
  author       = {{Khedkar, Mugdha and Schlichtig, Michael and Bodden, Eric}},
  booktitle    = {{In Proceedings of the 39th IEEE/ACM International Conference on Automated Software Engineering Workshops (ASEW ’24)}},
  location     = {{Sacramento, California}},
  title        = {{{Advancing Android Privacy Assessments with Automation}}},
  doi          = {{10.1145/3691621.3694953}},
  year         = {{2024}},
}

@inbook{52662,
  abstract     = {{Static analysis tools support developers in detecting potential coding issues, such as bugs or vulnerabilities. Research emphasizes technical challenges of such tools but also mentions severe usability shortcomings. These shortcomings hinder the adoption of static analysis tools, and user dissatisfaction may even lead to tool abandonment. To comprehensively assess the state of the art, we present the first systematic usability evaluation of a wide range of static analysis tools. We derived a set of 36 relevant criteria from the literature and used them to evaluate a total of 46 static analysis tools complying with our inclusion and exclusion criteria - a representative set of mainly non-proprietary tools. The evaluation against the usability criteria in a multiple-raters approach shows that two thirds of the considered tools off er poor warning messages, while about three-quarters provide hardly any fix support. Furthermore, the integration of user knowledge is strongly neglected, which could be used for instance, to improve handling of false positives. Finally, issues regarding workflow integration and specialized user interfaces are revealed. These findings should prove useful in guiding and focusing further research and development in user experience for static code analyses.}},
  author       = {{Nachtigall, Marcus and Schlichtig, Michael and Bodden, Eric}},
  booktitle    = {{Software Engineering 2023}},
  isbn         = {{978-3-88579-726-5}},
  keywords     = {{Automated static analysis, Software usability}},
  pages        = {{95–96}},
  publisher    = {{Gesellschaft für Informatik e.V.}},
  title        = {{{Evaluation of Usability Criteria Addressed by Static Analysis Tools on a Large Scale}}},
  year         = {{2023}},
}

@inbook{52660,
  abstract     = {{Application Programming Interfaces (APIs) are the primary mechanism developers use to obtain access to third-party algorithms and services. Unfortunately, APIs can be misused, which can have catastrophic consequences, especially if the APIs provide security-critical functionalities like cryptography. Understanding what API misuses are, and how they are caused, is important to prevent them, eg, with API misuse detectors. However, definitions for API misuses and related terms in literature vary. This paper presents a systematic literature review to clarify these terms and introduces FUM, a novel Framework for API Usage constraint and Misuse classification. The literature review revealed that API misuses are violations of API usage constraints. To address this, we provide unified definitions and use them to derive FUM. To assess the extent to which FUM aids in determining and guiding the improvement of an API misuses detector’s capabilities, we performed a case study on the state-of the-art misuse detection tool CogniCrypt. The study showed that FUM can be used to properly assess CogniCrypt’s capabilities, identify weaknesses and assist in deriving mitigations and improvements.}},
  author       = {{Schlichtig, Michael and Sassalla, Steffen and Narasimhan, Krishna and Bodden, Eric}},
  booktitle    = {{Software Engineering 2023}},
  isbn         = {{978-3-88579-726-5}},
  keywords     = {{API misuses  API usage constraints, classification framework, API misuse detection, static analysis}},
  pages        = {{105–106}},
  publisher    = {{Gesellschaft für Informatik e.V.}},
  title        = {{{Introducing FUM: A Framework for API Usage Constraint and Misuse Classification}}},
  year         = {{2023}},
}

@misc{32409,
  abstract     = {{Context: Cryptographic APIs are often misused in real-world applications. Therefore, many cryptographic API misuse detection tools have been introduced. However, there exists no established reference benchmark for a fair and comprehensive comparison and evaluation of these tools. While there are benchmarks, they often only address a subset of the domain or were only used to evaluate a subset of existing misuse detection tools. Objective: To fairly compare cryptographic API misuse detection tools and to drive future development in this domain, we will devise such a benchmark. Openness and transparency in the generation process are key factors to fairly generate and establish the needed benchmark. Method: We propose an approach where we derive the benchmark generation methodology from the literature which consists of general best practices in benchmarking and domain-specific benchmark generation. A part of this methodology is transparency and openness of the generation process, which is achieved by pre-registering this work. Based on our methodology we design CamBench, a fair "Cryptographic API Misuse Detection Tool Benchmark Suite". We will implement the first version of CamBench limiting the domain to Java, the JCA, and static analyses. Finally, we will use CamBench to compare current misuse detection tools and compare CamBench to related benchmarks of its domain.}},
  author       = {{Schlichtig, Michael and Wickert, Anna-Katharina and Krüger, Stefan and Bodden, Eric and Mezini, Mira}},
  keywords     = {{cryptography, benchmark, API misuse, static analysis}},
  title        = {{{CamBench -- Cryptographic API Misuse Detection Tool Benchmark Suite}}},
  doi          = {{10.48550/ARXIV.2204.06447}},
  year         = {{2022}},
}

@inproceedings{32410,
  abstract     = {{Static analysis tools support developers in detecting potential coding issues, such as bugs or vulnerabilities. Research on static analysis emphasizes its technical challenges but also mentions severe usability shortcomings. These shortcomings hinder the adoption of static analysis tools, and in some cases, user dissatisfaction even leads to tool abandonment.
To comprehensively assess the current state of the art, this paper presents the first systematic usability evaluation in a wide range of static analysis tools. We derived a set of 36 relevant criteria from the scientific literature and gathered a collection of 46 static analysis tools complying with our inclusion and exclusion criteria - a representative set of mainly non-proprietary tools. Then, we evaluated how well these tools fulfill the aforementioned criteria.
The evaluation shows that more than half of the considered tools offer poor warning messages, while about three-quarters of the tools provide hardly any fix support. Furthermore, the integration of user knowledge is strongly neglected, which could be used for improved handling of false positives and tuning the results for the corresponding developer. Finally, issues regarding workflow integration and specialized user interfaces are proved further.
These findings should prove useful in guiding and focusing further research and development in the area of user experience for static code analyses.}},
  author       = {{Nachtigall, Marcus and Schlichtig, Michael and Bodden, Eric}},
  booktitle    = {{Proceedings of the 31st ACM SIGSOFT International Symposium on Software Testing and Analysis}},
  isbn         = {{9781450393799}},
  keywords     = {{Automated static analysis, Software usability}},
  pages        = {{532 -- 543}},
  publisher    = {{ACM}},
  title        = {{{A Large-Scale Study of Usability Criteria Addressed by Static Analysis Tools}}},
  doi          = {{10.1145/3533767}},
  year         = {{2022}},
}

@inproceedings{31133,
  abstract     = {{Application Programming Interfaces (APIs) are the primary mechanism that developers use to obtain access to third-party algorithms and services. Unfortunately, APIs can be misused, which can have catastrophic consequences, especially if the APIs provide security-critical functionalities like cryptography. Understanding what API misuses are, and for what reasons they are caused, is important to prevent them, e.g., with API misuse detectors. However, definitions and nominations for API misuses and related terms in literature vary and are diverse. This paper addresses the problem of scattered knowledge and definitions of API misuses by presenting a systematic literature review on the subject and introducing FUM, a novel Framework for API Usage constraint and Misuse classification. The literature review revealed that API misuses are violations of API usage constraints. To capture this, we provide unified definitions and use them to derive FUM. To assess the extent to which FUM aids in determining and guiding the improvement of an API misuses detectors' capabilities, we performed a case study on CogniCrypt, a state-of-the-art misuse detector for cryptographic APIs. The study showed that FUM can be used to properly assess CogniCrypt's capabilities, identify weaknesses and assist in deriving mitigations and improvements. And it appears that also more generally FUM can aid the development and improvement of misuse detection tools.}},
  author       = {{Schlichtig, Michael and Sassalla, Steffen and Narasimhan, Krishna and Bodden, Eric}},
  booktitle    = {{2022 IEEE International Conference on Software Analysis, Evolution and Reengineering (SANER)}},
  keywords     = {{API misuses, API usage constraints, classification framework, API misuse detection, static analysis}},
  pages        = {{673 -- 684}},
  title        = {{{FUM - A Framework for API Usage constraint and Misuse Classification}}},
  doi          = {{https://doi.org/10.1109/SANER53432.2022.00085}},
  year         = {{2022}},
}

@misc{33959,
  abstract     = {{Recent studies have revealed that 87 % to 96 % of the Android apps using cryptographic APIs have a misuse which may cause security vulnerabilities. As previous studies did not conduct a qualitative examination of the validity and severity of the findings, our objective was to understand the findings in more depth. We analyzed a set of 936 open-source Java applications for cryptographic misuses. Our study reveals that 88.10 % of the analyzed applications fail to use cryptographic APIs securely. Through our manual analysis of a random sample, we gained new insights into effective false positives. For example, every fourth misuse of the frequently misused JCA class MessageDigest is an effective false positive due to its occurrence in a non-security context. As we wanted to gain deeper insights into the security implications of these misuses, we created an extensive vulnerability model for cryptographic API misuses. Our model includes previously undiscussed attacks in the context of cryptographic APIs such as DoS attacks. This model reveals that nearly half of the misuses are of high severity, e.g., hard-coded credentials and potential Man-in-the-Middle attacks.}},
  author       = {{Wickert, Anna-Katharina and Baumgärtner, Lars and Schlichtig, Michael and Mezini, Mira}},
  title        = {{{To Fix or Not to Fix: A Critical Study of Crypto-misuses in the Wild}}},
  doi          = {{10.48550/ARXIV.2209.11103}},
  year         = {{2022}},
}

@inproceedings{29298,
  abstract     = {{Die Themen „Big Data“, „Künstliche Intelligenz und „Data Science“ werden seit einiger Zeit nicht nur in der breiten Öffentlichkeit kontrovers diskutiert, sondern stellen für die Ausbildung in den IT- und IT-nahen Berufen schon heute neue Herausforderungen dar, die in Zukunft durch die gesellschaftliche und technologische Weiterentwicklung hin zu einer Datengesellschaft noch größer werden.
An dieser Stelle stellt sich die Frage, welche Aspekte dieses großen Themenkomplexes für Schule und Ausbildung von Wichtigkeit sind und wie diese Themen sinnstiftend und gewinnbringend in die informatische Ausbildung in verschiedenen Bildungsgängen integriert werden können. Im Rahmen des von uns im Jahr 2017 organisierten Symposiums zum Thema „Data Science“ wurden für die Bildung relevante Aspekte erörtert, wodurch als Kernelemente für den Unterricht Algorithmen der Künstlichen Intelligenz und ihre Anwendung in Industrie und Gesellschaft, Explorationen von Big Data sowie der Umgang mit eigenen Daten in sozialen Netzwerken herausgearbeitet wurden. Ziel ist, aus diesen Themenbereichen sowohl ein umfassendes Curriculum als auch Module für verschiedene Unterrichtsszenarien zu entwickeln und zu erproben. Durch diese Materialien soll es Lehrkräften aus der Informatik, Mathematik oder Technik ermöglicht werden, diese Themen auf Basis des Curriculums und der erprobten Unterrichtskonzepte selbst zu unterrichten.
Hierfür wurde im Rahmen des Projekts ProDaBi (Projekt Data Science und Big Data in der Schule, https://www.prodabi.de), initiiert von der Telekom Stiftung, ein experimenteller Projektkurs entwickelt, den wir mit Schüler:innen der Sekundarstufe II an der Universität Paderborn im Schuljahr 2018/19 durchführten. Dieser Kurs enthält neben einem Modul zur Exploration von Big Data und einem weiteren Modul zum Maschinellen Lernen als Teil der Künstlichen Intelligenz auch eine Projektphase, die es in Zusammenarbeit mit lokalen Unternehmen den Schüler:innen
ermöglicht, das Erlernte in ein reales Data Science-Projekt einzubringen. Aus den Erfahrungen dieses Projektkurses sowie den parallel durchgeführten Erprobungen einzelner Bausteine auch mit beruflichen Schulen werden ab dem Schuljahr 2019/20 die hierfür verwendeten Materialien weiterentwickelt und weiteren Kooperationspartnern zur Erprobung zur Verfügung gestellt. Damit wurden zum Ende des Projekts nicht nur vollständige Unterrichtsmaterialien, sondern auch ein umfassendes Curriculum entwickelt.}},
  author       = {{Opel, Simone Anna and Schlichtig, Michael}},
  booktitle    = {{Sammelband der 27. Fachtagung der BAG Berufliche Bildung}},
  editor       = {{Vollmer, Thomas and Karges, Torben and Richter, Tim and Schlömer, Britta and Schütt-Sayed, Sören}},
  keywords     = {{Berufsbildung, vocational education, Ausbildung, training, berufliche Weiterbildung, advanced vocational education, Digitalisierung, digitalization, Unterricht, teaching, Lehrmethode, teaching method, Interdisziplinarität, interdisciplinarity, Fachdidaktik, subject didactics, Curriculum, curriculum, gewerblich-technischer Beruf, vocational/technical occupation, Fachkraft, specialist, Qualifikationsanforderungen, qualification requirements, Kompetenz, competence, Lehrerbildung, teacher training, Bundesrepublik Deutschland, Federal Republic of Germany}},
  location     = {{Siegen}},
  pages        = {{176--194}},
  publisher    = {{wbv Media GmbH & Co. KG}},
  title        = {{{Data Science und Big Data in der beruflichen Bildung – Konzeption und Erprobung eines Projektkurses für die Sekundarstufe II}}},
  doi          = {{https://doi.org/10.3278/6004722w}},
  volume       = {{55}},
  year         = {{2020}},
}

@inproceedings{15332,
  abstract     = {{Artificial intelligence (AI) has the potential for far-reaching – in our opinion – irreversible changes.
They range from effects on the individual and society to new societal and social issues. The question arises
as to how students can learn the basic functioning of AI systems, what areas of life and society are affected
by these and – most important – how their own lives are affected by these changes. Therefore, we are developing and evaluating school materials for the German ”Science Year AI”. It can be used for students of all
school types from the seventh grade upwards and will be distributed to about 2000 schools in autumn with
the support of the Federal Ministry of Education and Research. The material deals with the following aspects
of AI: Discussing everyday experiences with AI, how does machine learning work, historical development
of AI concepts, difference between man and machine, future distribution of roles between man and machine,
in which AI world do we want to live and how much AI would we like to have in our lives. Through an
accompanying evaluation, high quality of the technical content and didactic preparation is achieved in order
to guarantee the long-term applicability in the teaching context in the different age groups and school types.
In this paper, we describe the current state of the material development, the challenges arising, and the results
of tests with different classes to date. We also present first ideas for evaluating the results.}},
  author       = {{Schlichtig, Michael and Opel, Simone Anna and Budde, Lea and Schulte, Carsten}},
  booktitle    = {{ISSEP 2019 - 12th International conference on informatics in schools: Situation, evaluation and perspectives, Local Proceedings}},
  editor       = {{Jasutė, Eglė and Pozdniakov, Sergei}},
  isbn         = {{978-9925-553-27-3}},
  keywords     = {{Artificial Intelligence, Machine Learning, Teaching Material, Societal Aspects, Ethics. Social Aspects, Science Year, Simulation Game}},
  location     = {{Lanarca}},
  pages        = {{65 -- 73}},
  title        = {{{Understanding Artificial Intelligence – A Project for the Development of Comprehensive Teaching Material}}},
  volume       = {{12}},
  year         = {{2019}},
}

@inproceedings{15640,
  author       = {{Opel, Simone Anna and Schlichtig, Michael and Schulte, Carsten and Biehler, Rolf and Frischemeier, Daniel and Podworny, Susanne and Wassong, Thomas}},
  booktitle    = {{INFOS}},
  pages        = {{285--294}},
  publisher    = {{Gesellschaft für Informatik}},
  title        = {{{Entwicklung und Reflexion einer Unterrichtssequenz zum Maschinellen Lernen als Aspekt von Data Science in der Sekundarstufe II}}},
  volume       = {{P-288}},
  year         = {{2019}},
}

@inproceedings{15641,
  author       = {{Schlichtig, Michael and Opel, Simone Anna and Schulte, Carsten and Biehler, Rolf and Frischemeier, Daniel and Podworny, Susanne and Wassong, Thomas}},
  booktitle    = {{INFOS}},
  pages        = {{385}},
  publisher    = {{Gesellschaft für Informatik}},
  title        = {{{Maschinelles Lernen im Unterricht mit Jupyter Notebook}}},
  volume       = {{P-288}},
  year         = {{2019}},
}

@inproceedings{15643,
  author       = {{Opel, Simone Anna and Schlichtig, Michael and Schulte, Carsten}},
  booktitle    = {{WiPSCE}},
  pages        = {{11:1--11:2}},
  publisher    = {{ACM}},
  title        = {{{Developing Teaching Materials on Artificial Intelligence by Using a Simulation Game (Work in Progress)}}},
  year         = {{2019}},
}

@inproceedings{14848,
  abstract     = {{Data Science und Big Data durchdringt in ihren diversen Facetten unser tägliches Leben– kaum ein Tag, an dem nicht verschiedene Meldungen über technische Innovationen, Einsatzmöglichkeiten von Künstlicher Intelligenz (KI) und Maschinelles Lernen (ML) und ihre ethischen sowie gesellschaftlichen Implikationen in den unterschiedlichen Medien diskutiert werden. Aus diesem Grund erscheint es uns immens wichtig, diese Fragestellungen und Technologien auch in den Unterricht der Sekundarstufe II zu integrieren. Um diesem Anspruch gerecht zu werden, entwickelten wir im Rahmen eines Forschungsprojekts ein Curriculum, welches wir als konkretes Unterrichtskonzept innerhalb eines Projektkurses erprobt, evaluiert weiterentwickelt wird. Bei der Implementierung entschieden wir uns, zur aktiven Umsetzung von Konzepten von ML als Plattform Jupyter Notebook mit Python zu verwenden, da diese Umgebung durch die Verbindung von Code und Hypertext zur Dokumentation und Erklärung Medienbrüche im Lernprozess verringern kann. Zudem ist Python zur Implementierung der Methoden von ML sehr gut geeignet. Im Themenfeld des ML als Teilgebiet der KI legen wir den Fokus auf zwei unterschiedliche Lernverfahren um verschieden Aspekte von ML, u.A. wie Nachvollziehbarkeit unter gesellschaftlichen Gesichtspunkten zu vermitteln. Diese sind Künstliche Neuronale Netze (bei denen die Berechnung und Bedeutung der Kantengewichte zwischen den Neuronen für den Menschen insbesondere bei komplexeren Netzen kaum nachvollziehbar erschienen) und Entscheidungsbäume (strukturierte und gerichtete Bäume zur Darstellung von Entscheidungsregeln, welche auch für Schülerinnen und Schüler meist gut nachvollziehbares und verständliches KI-Modell darstellen). In diesem Workshop stellen wir konkrete Umsetzungsbeispiele inklusive der Programmierung für beide Verfahren mit Jupyter Notebook und Python als Teil einer Unterrichtssequenz vor und diskutieren diese.}},
  author       = {{Schlichtig, Michael and Opel, Simone and Schulte, Carsten and Biehler, Rolf and Frischemeier, Daniel and Podworny, Susanne and Wassong, Thomas}},
  booktitle    = {{Informatik für alle}},
  editor       = {{Pasternak, Arno}},
  isbn         = {{978-3-88579-682-4}},
  location     = {{Dortmund, Germany}},
  pages        = {{ 385 }},
  publisher    = {{Gesellschaft für Informatik}},
  title        = {{{Maschinelles Lernen im Unterricht mit Jupyter Notebook}}},
  year         = {{2019}},
}

