@inproceedings{64265,
  author       = {{Rohde, Jannik and Meyer, Olga and Duc, Quy Luu and Jürgenhake, Christoph and Sankal, Talib and Dumitrescu, Roman and Schmitt, Robert H.}},
  booktitle    = {{2022 Sixth IEEE International Conference on Robotic Computing (IRC)}},
  publisher    = {{IEEE}},
  title        = {{{Teleoperation of an Industrial Robot using Public Networks and 5G SA Campus Networks}}},
  doi          = {{10.1109/irc55401.2022.00012}},
  year         = {{2023}},
}

@inproceedings{64261,
  author       = {{Kurpick, Christian and Dumitrescu, Roman and Falkowski, Tommy and Fechtelpeter, Christian and Kühn, Arno}},
  booktitle    = {{2022 IEEE 28th International Conference on Engineering, Technology and Innovation (ICE/ITMC) &amp; 31st International Association For Management of Technology (IAMOT) Joint Conference}},
  publisher    = {{IEEE}},
  title        = {{{Digitalization and Sustainability in Strategic Management: Research Agenda toward Dual Transformation}}},
  doi          = {{10.1109/ice/itmc-iamot55089.2022.10033146}},
  year         = {{2023}},
}

@inproceedings{49318,
  author       = {{Tissen, Denis and Koldewey, Christian and Dumitrescu, Roman}},
  location     = {{Ljubljana, Slovenia}},
  title        = {{{A process-model for tailoring prototyping of cyber-physical systems}}},
  year         = {{2023}},
}

@article{47151,
  abstract     = {{<jats:p>When it comes to mastering the digital world, the education system is more and more facing the task of making students competent and self-determined agents when interacting with digital artefacts. This task often falls to computing education. In the traditional fields of computing education, a plethora of models, guidelines, and principles exist, which help scholars and teachers identify what the relevant aspects are and which of them one should cover in the classroom. When it comes to explaining the world of digital artefacts, however, there is hardly any such guiding model. The ARIadne model introduced in this paper provides a means of explanation and exploration of digital artefacts which help teachers and students to do a subject analysis of digital artefacts by scrutinizing them from several perspectives. Instead of artificially separating aspects which target the same phenomena within different areas of education (like computing, ICT or media education), the model integrates technological aspects of digital artefacts and the relevant societal discourses of their usage, their impacts and the reasons behind their development into a coherent explanation model.</jats:p>}},
  author       = {{Winkelnkemper, Felix and Höper, Lukas and Schulte, Carsten}},
  issn         = {{1648-5831}},
  journal      = {{Informatics in Education}},
  keywords     = {{Computer Science Applications, Communication, Education, General Engineering}},
  publisher    = {{Vilnius University Press}},
  title        = {{{ARIadne – An Explanation Model for Digital Artefacts}}},
  doi          = {{10.15388/infedu.2024.09}},
  year         = {{2023}},
}

@inbook{46572,
  abstract     = {{Indonesian is classified as underrepresented in the Natural Language Processing (NLP) field, despite being the tenth most spoken language in the world with 198 million speakers. The paucity of datasets is recognized as the main reason for the slow advancements in NLP research for underrepresented languages. Significant attempts were made in 2020 to address this drawback for Indonesian. The Indonesian Natural Language Understanding (IndoNLU) benchmark was introduced alongside IndoBERT pre-trained language model. The second benchmark, Indonesian Language Evaluation Montage (IndoLEM), was presented in the same year. These benchmarks support several tasks, including Named Entity Recognition (NER). However, all NER datasets are in the public domain and do not contain domain-specific datasets. To alleviate this drawback, we introduce IndQNER, a manually annotated NER benchmark dataset in the religious domain that adheres to a meticulously designed annotation guideline. Since Indonesia has the world’s largest Muslim population, we build the dataset from the Indonesian translation of the Quran. The dataset includes 2475 named entities representing 18 different classes. To assess the annotation quality of IndQNER, we perform experiments with BiLSTM and CRF-based NER, as well as IndoBERT fine-tuning. The results reveal that the first model outperforms the second model achieving 0.98 F1 points. This outcome indicates that IndQNER may be an acceptable evaluation metric for Indonesian NER tasks in the aforementioned domain, widening the research’s domain range.}},
  author       = {{Gusmita, Ria Hari and Firmansyah, Asep Fajar and Moussallem, Diego and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Natural Language Processing and Information Systems}},
  isbn         = {{9783031353192}},
  issn         = {{0302-9743}},
  keywords     = {{NER benchmark dataset, Indonesian, specific domain}},
  location     = {{Derby, UK}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{IndQNER: Named Entity Recognition Benchmark Dataset from the Indonesian Translation of the Quran}}},
  doi          = {{10.1007/978-3-031-35320-8_12}},
  year         = {{2023}},
}

@inbook{52859,
  author       = {{de Camargo e Souza Câmara, Igor and Turhan, Anni-Yasmin}},
  booktitle    = {{Logics in Artificial Intelligence}},
  isbn         = {{9783031436185}},
  issn         = {{0302-9743}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Deciding Subsumption in Defeasible 𝓔𝓛𝓘⊥ with Typicality Models}}},
  doi          = {{10.1007/978-3-031-43619-2_36}},
  year         = {{2023}},
}

@inproceedings{58723,
  abstract     = {{In real-world debates, the most common way to counter an argument is to reason against its main point, that is, its conclusion. Existing work on the automatic generation of natural language counter-arguments does not address the relation to the conclusion, possibly because many arguments leave their conclusion implicit. In this paper, we hypothesize that the key to effective counter-argument generation is to explicitly model the argument‘s conclusion and to ensure that the stance of the generated counter is opposite to that conclusion. In particular, we propose a multitask approach that jointly learns to generate both the conclusion and the counter of an input argument. The approach employs a stance-based ranking component that selects the counter from a diverse set of generated candidates whose stance best opposes the generated conclusion. In both automatic and manual evaluation, we provide evidence that our approach generates more relevant and stance-adhering counters than strong baselines.}},
  author       = {{Alshomary, Milad and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics}},
  editor       = {{Vlachos, Andreas and Augenstein, Isabelle}},
  pages        = {{957–967}},
  publisher    = {{Association for Computational Linguistics}},
  title        = {{{Conclusion-based Counter-Argument Generation}}},
  doi          = {{10.18653/v1/2023.eacl-main.67}},
  year         = {{2023}},
}

@article{60355,
  abstract     = {{<jats:p>We present a method for the generation of higher-order tetrahedral meshes. In contrast to previous methods, the curved tetrahedral elements are guaranteed to be free of degeneracies and inversions while conforming exactly to prescribed piecewise polynomial surfaces, such as domain boundaries or material interfaces. Arbitrary polynomial order is supported. Algorithmically, the polynomial input surfaces are first covered by a single layer of carefully constructed curved elements using a recursive refinement procedure that provably avoids degeneracies and inversions. These tetrahedral elements are designed such that the remaining space is bounded piecewise linearly. In this way, our method effectively reduces the curved meshing problem to the classical problem of linear mesh generation (for the remaining space).</jats:p>}},
  author       = {{Khanteimouri, Payam and Campen, Marcel}},
  issn         = {{0730-0301}},
  journal      = {{ACM Transactions on Graphics}},
  number       = {{6}},
  pages        = {{1--19}},
  publisher    = {{Association for Computing Machinery (ACM)}},
  title        = {{{3D Bézier Guarding: Boundary-Conforming Curved Tetrahedral Meshing}}},
  doi          = {{10.1145/3618332}},
  volume       = {{42}},
  year         = {{2023}},
}

@article{60337,
  abstract     = {{<jats:p>
            Volumetric mapping is a ubiquitous and difficult problem in Geometry Processing and has been the subject of research in numerous and various directions. While several methods show encouraging results, the field still lacks a general approach with guarantees regarding map bijectivity. Through this work, we aim at opening the door to a new family of methods by providing a novel framework based on the concept of
            <jats:italic>progressive expansion.</jats:italic>
            Starting from an initial map of a tetrahedral mesh whose image may contain degeneracies but no inversions, we incrementally adjust vertex images to expand degenerate elements. By restricting movement to so-called
            <jats:italic>expansion cones</jats:italic>
            , it is done in such a way that the number of degenerate elements decreases in a strictly monotonic manner, without ever introducing any inversion. Adaptive local refinement of the mesh is performed to facilitate this process. We describe a prototype algorithm in the realm of this framework for the computation of maps from ball-topology tetrahedral meshes to convex or star-shaped domains. This algorithm is evaluated and compared to state-of-the-art methods, demonstrating its benefits in terms of bijectivity. We also discuss the associated cost in terms of sometimes significant mesh refinement to obtain the necessary degrees of freedom required for establishing a valid mapping. Our conclusions include that while this algorithm is only of limited immediate practical utility due to efficiency concerns, the general framework has the potential to inspire a range of novel methods improving on the efficiency aspect.
          </jats:p>}},
  author       = {{Nigolian, Valentin Zénon and Campen, Marcel and Bommes, David}},
  issn         = {{0730-0301}},
  journal      = {{ACM Transactions on Graphics}},
  number       = {{4}},
  pages        = {{1--19}},
  publisher    = {{Association for Computing Machinery (ACM)}},
  title        = {{{Expansion Cones: A Progressive Volumetric Mapping Framework}}},
  doi          = {{10.1145/3592421}},
  volume       = {{42}},
  year         = {{2023}},
}

@article{60354,
  abstract     = {{<jats:p>We present a set of operators to perform modifications, in particular collapses and splits, in volumetric cell complexes which are discretely embedded in a background mesh. Topological integrity and geometric embedding validity are carefully maintained. We apply these operators strategically to volumetric block decompositions, so-called T-meshes or base complexes, in the context of hexahedral mesh generation. This allows circumventing the expensive and unreliable global volumetric remapping step in the versatile meshing pipeline based on 3D integer-grid maps. In essence, we reduce this step to simpler local cube mapping problems, for which reliable solutions are available. As a consequence, the robustness of the mesh generation process is increased, especially when targeting coarse or block-structured hexahedral meshes. We furthermore extend this pipeline to support feature alignment constraints, and systematically respect these throughout, enabling the generation of meshes that align to points, curves, and surfaces of special interest, whether on the boundary or in the interior of the domain.</jats:p>}},
  author       = {{Brückler, Hendrik and Campen, Marcel}},
  issn         = {{0730-0301}},
  journal      = {{ACM Transactions on Graphics}},
  number       = {{6}},
  pages        = {{1--24}},
  publisher    = {{Association for Computing Machinery (ACM)}},
  title        = {{{Collapsing Embedded Cell Complexes for Safer Hexahedral Meshing}}},
  doi          = {{10.1145/3618384}},
  volume       = {{42}},
  year         = {{2023}},
}

@article{60335,
  abstract     = {{<jats:p>A method is presented to compute volumetric maps and parametrizations of objects over 3D domains. As a key feature, continuity and bijectivity are ensured by construction. Arbitrary objects of ball topology, represented as tetrahedral meshes, are supported. Arbitrary convex as well as star-shaped domains are supported. Full control over the boundary mapping is provided. The method is based on the technique of simplicial foliations, generalized to a broader class of domain shapes and applied adaptively in a novel localized manner. This increases flexibility as well as efficiency over the state of the art, while maintaining reliability in guaranteeing map bijectivity.</jats:p>}},
  author       = {{Hinderink, Steffen and Campen, Marcel}},
  issn         = {{0730-0301}},
  journal      = {{ACM Transactions on Graphics}},
  number       = {{4}},
  pages        = {{1--16}},
  publisher    = {{Association for Computing Machinery (ACM)}},
  title        = {{{Galaxy Maps: Localized Foliations for Bijective Volumetric Mapping}}},
  doi          = {{10.1145/3592410}},
  volume       = {{42}},
  year         = {{2023}},
}

@article{60333,
  abstract     = {{<jats:title>Abstract</jats:title><jats:p>We describe HalfedgeCNN, a collection of modules to build neural networks that operate on triangle meshes. Taking inspiration from the (edge‐based) MeshCNN, convolution, pooling, and unpooling layers are consistently defined on the basis of halfedges of the mesh, pairs of oppositely oriented virtual instances of each edge. This provides benefits over alternative definitions on the basis of vertices, edges, or faces. Additional interface layers enable support for feature data associated with such mesh entities in input and output as well. Due to being defined natively on mesh entities and their neighborhoods, lossy resampling or interpolation techniques (to enable the application of operators adopted from image domains) do not need to be employed. The operators have various degrees of freedom that can be exploited to adapt to application‐specific needs.</jats:p>}},
  author       = {{Ludwig, Ingmar and Tyson, Daniel and Campen, Marcel}},
  issn         = {{0167-7055}},
  journal      = {{Computer Graphics Forum}},
  number       = {{5}},
  publisher    = {{Wiley}},
  title        = {{{HalfedgeCNN for Native and Flexible Deep Learning on Triangle Meshes}}},
  doi          = {{10.1111/cgf.14898}},
  volume       = {{42}},
  year         = {{2023}},
}

@inproceedings{46500,
  abstract     = {{The security of Industrial Control Systems is relevant both for reliable production system operations and for high-quality throughput in terms of manufactured products. Security measures are designed, operated and maintained by different roles along product and production system lifecycles. Defense-in-Depth as a paradigm builds upon the assumption that breaches are unavoidable. The paper at hand provides an analysis of roles, corresponding Human Factors and their relevance for data theft and sabotage attacks. The resulting taxonomy is reflected by an example related to Additive Manufacturing. The results assist in both designing and redesigning Industrial Control System as part of an entire production system so that Defense-in-Depth with regard to Human Factors is built in by design.}},
  author       = {{Pottebaum, Jens and Rossel, Jost and Somorovsky, Juraj and Acar, Yasemin and Fahr, René and Arias Cabarcos, Patricia and Bodden, Eric and Gräßler, Iris}},
  booktitle    = {{2023 IEEE European Symposium on Security and Privacy Workshops (EuroS&PW)}},
  keywords     = {{Defense-in-Depth, Human Factors, Production Engineering, Product Design, Systems Engineering}},
  location     = {{Delft, Netherlands}},
  pages        = {{379--385}},
  publisher    = {{IEEE}},
  title        = {{{Re-Envisioning Industrial Control Systems Security by Considering Human Factors as a Core Element of Defense-in-Depth}}},
  doi          = {{10.1109/eurospw59978.2023.00048}},
  year         = {{2023}},
}

@inproceedings{48012,
  abstract     = {{3D printing is a well-established technology with rapidly increasing usage scenarios both in the industry and consumer context. The growing popularity of 3D printing has also attracted security researchers, who have analyzed possibilities for weakening 3D models or stealing intellectual property from 3D models. We extend these important aspects and provide the first comprehensive security analysis of 3D printing data formats. We performed our systematic study on the example of the 3D Manufacturing Format (3MF), which offers a large variety of features that could lead to critical attacks. Based on 3MF’s features, we systematized three attack goals: Data Exfiltration (dex), Denial of Service, and UI Spoofing (uis). We achieve these goals by exploiting the complexity of 3MF, which is based on the Open Packaging Conventions (OPC) format and uses XML to define 3D models. In total, our analysis led to 352 tests. To create and run these tests automatically, we implemented an open-source tool named 3MF Analyzer (tool), which helped us evaluate 20 applications.}},
  author       = {{Rossel, Jost and Mladenov, Vladislav and Somorovsky, Juraj}},
  booktitle    = {{Proceedings of the 26th International Symposium on Research in Attacks, Intrusions and Defenses}},
  keywords     = {{Data Format Security, 3D Manufacturing Format, 3D Printing, Additive Manufacturing}},
  location     = {{Hongkong}},
  publisher    = {{ACM}},
  title        = {{{Security Analysis of the 3MF Data Format}}},
  doi          = {{10.1145/3607199.3607216}},
  year         = {{2023}},
}

@inproceedings{47448,
  abstract     = {{In XAI it is important to consider that, in contrast to explanations for professional audiences, one cannot assume common expertise when explaining for laypeople. But such explanations between humans vary greatly, making it difficult to research commonalities across explanations. We used the dual nature theory, a techno-philosophical approach, to cope with these challenges. According to it, one can explain, for example, an XAI's decision by addressing its dual nature: by focusing on the Architecture (e.g., the logic of its algorithms) or the Relevance (e.g., the severity of a decision, the implications of a recommendation). We investigated 20 game explanations using the theory as an analytical framework. We elaborate how we used the theory to quickly structure and compare explanations of technological artifacts. We supplemented results from analyzing the explanation contents with results from a video recall to explore how explainers justified their explanation. We found that explainers were focusing on the physical aspects of the game first (Architecture) and only later on aspects of the Relevance. Reasoning in the video recalls indicated that EX regarded the focus on the Architecture as important for structuring the explanation initially by explaining the basic components before focusing on more complex, intangible aspects. Shifting between addressing the two sides was justified by explanation goals, emerging misunderstandings, and the knowledge needs of the explainee. We discovered several commonalities that inspire future research questions which, if further generalizable, provide first ideas for the construction of synthetic explanations.}},
  author       = {{Terfloth, Lutz and Schaffer, Michael and Buhl, Heike M. and Schulte, Carsten}},
  isbn         = {{978-3-031-44069-4}},
  location     = {{Lisboa}},
  publisher    = {{Springer, Cham}},
  title        = {{{Adding Why to What? Analyses of an Everyday Explanation}}},
  doi          = {{10.1007/978-3-031-44070-0_13}},
  year         = {{2023}},
}

@inbook{54588,
  author       = {{Manzoor, Ali and Saleem, Muhammad and Moussallem, Diego and Sherif, Mohamed and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{The Semantic Web}},
  isbn         = {{9783031334542}},
  issn         = {{0302-9743}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{RELD: A Knowledge Graph of Relation Extraction Datasets}}},
  doi          = {{10.1007/978-3-031-33455-9_20}},
  year         = {{2023}},
}

@inbook{46191,
  author       = {{Alt, Christoph and Kenter, Tobias and Faghih-Naini, Sara and Faj, Jennifer and Opdenhövel, Jan-Oliver and Plessl, Christian and Aizinger, Vadym and Hönig, Jan and Köstler, Harald}},
  booktitle    = {{Lecture Notes in Computer Science}},
  isbn         = {{9783031320408}},
  issn         = {{0302-9743}},
  publisher    = {{Springer Nature Switzerland}},
  title        = {{{Shallow Water DG Simulations on FPGAs: Design and Comparison of a Novel Code Generation Pipeline}}},
  doi          = {{10.1007/978-3-031-32041-5_5}},
  year         = {{2023}},
}

@inproceedings{46190,
  author       = {{Opdenhövel, Jan-Oliver and Plessl, Christian and Kenter, Tobias}},
  booktitle    = {{Proceedings of the 13th International Symposium on Highly Efficient Accelerators and Reconfigurable Technologies (HEART)}},
  publisher    = {{ACM}},
  title        = {{{Mutation Tree Reconstruction of Tumor Cells on FPGAs Using a Bit-Level Matrix Representation}}},
  doi          = {{10.1145/3597031.3597050}},
  year         = {{2023}},
}

@inproceedings{35083,
  author       = {{Dann, Andreas Peter and Hermann, Ben and Bodden, Eric}},
  title        = {{{UpCy: Safely Updating Outdated Dependencies}}},
  year         = {{2023}},
}

@article{63060,
  author       = {{Wu, Jing and Wang, Lin and Jin, Qirui and Liu, Fangming}},
  issn         = {{1045-9219}},
  journal      = {{IEEE Transactions on Parallel and Distributed Systems}},
  number       = {{2}},
  pages        = {{280--296}},
  publisher    = {{Institute of Electrical and Electronics Engineers (IEEE)}},
  title        = {{{Graft: Efficient Inference Serving for Hybrid Deep Learning With SLO Guarantees via DNN Re-Alignment}}},
  doi          = {{10.1109/tpds.2023.3340518}},
  volume       = {{35}},
  year         = {{2023}},
}

