@inproceedings{63434,
  author       = {{Hoffmann, Max}},
  booktitle    = {{Proceedings of the Fourteenth Congress of the European Society for Research in Mathematics Education (CERME14)}},
  editor       = {{Bosch, Marianna and Bolondi, Giorgio and Carreira, Susana and Michael, Gaidoschik and Camilla, Spagnolo}},
  keywords     = {{hoffmann, reviewed, proceedings}},
  title        = {{{Using scriptwriting as a response format for interface tasks: Exemplary analyses in the context of symmetry}}},
  year         = {{2025}},
}

@inproceedings{55834,
  abstract     = {{Performance of music in the home was the means by which most works were received before the advent of audio recordings and broadcasts, yet the notation sources that form our primary record of this culture have not been the subject of comprehensive or methodical study. Choices made by arrangers adapting music for domestic consumption – of instrumentation, abbreviation, or simplification – reflect the musical life of the 19th century, and can inform our understanding alongside contemporary accounts such as newspapers, adverts, and diaries. This position paper gives the background, motivation, and proposed approach of research currently being undertaken within the Beethoven in the House project. This will include a study of Steiner editions of Beethoven’s 7th and 8th Symphonies and Wellingtons Sieg, making a detailed comparison between arrangements, systematically identifying a core common to multiple versions, and asking if this reflects the stated values of the publisher. A second survey will look for patterns across a larger sample of lesser-known and poorly catalogued scores, collating emergent indicators of arrangers’ motivations within a narrative of the domestic market – the music industry of its day. Both studies will innovate digital methods which characterise arrangements as music encodings, including ‘sparse’ approaches to notation and annotation.}},
  author       = {{Page, Kevin R. and Kepper, Johannes and Siegert, Christine and Hankinson, Andrew and Lewis, David}},
  booktitle    = {{Music Encoding Conference Proceedings 2021}},
  editor       = {{Münnich, Stefan and Rizo, David}},
  isbn         = {{978-84-13-02173-7}},
  keywords     = {{BitH, mec-proceedings, mec-proceedings-2021}},
  pages        = {{117–123}},
  publisher    = {{Humanities Commons}},
  title        = {{{Beethoven in the House: Digital Studies of Domestic Music Arrangements}}},
  doi          = {{10.17613/389b-xx73}},
  year         = {{2022}},
}

@inproceedings{55841,
  abstract     = {{For musicologists, the collation of multiple sources of the same work is a frequent task. By comparing different witnesses, they seek to identify variation, describe dependencies, and ultimately understand the genesis and transmission of (musical) works. Obviously, the need for such comparison is independent from the medium in which a musical work is manifested. In computing, comparing files for difference is a common task, and the well-known Unix utility diff is almost 46 years old. However, diff, like many other such tools, operates on plain text. While many music encoding formats based on plain text exist, formats used in the field of Digital Humanities are typically based on XML. There are dedicated algorithms for comparing XML as well, but they only focus on the syntax of XML, but not the semantic structures modelled into such standards as MEI. MEI seeks to describe musical structures, and the XML syntax is just a means to express those structures. A diff tool for music should focus on comparing musical structures, but not the specifics of their serialization into a file format. In {Beethovens Werkstatt}, a 16-year project focussed on exploring the concepts and requirements of digital genetic editions of music, based on and arguing with examples from Ludwig van Beethoven, a case-bound diff tool for music was developed. The following paper discusses how that specific tool can be generalized, and which use cases such a tool may support.}},
  author       = {{Herold, Kristin and Kepper, Johannes and Mo, Ran and Seipelt, Agnes Regina}},
  booktitle    = {{Music Encoding Conference Proceedings 2020}},
  editor       = {{De Luca, Elsa and Flanders, Julia}},
  keywords     = {{mec-proceedings, mec-proceedings-2020}},
  pages        = {{59–66}},
  publisher    = {{Humanities Commons}},
  title        = {{{MusicDiff – A Diff Tool for MEI}}},
  doi          = {{10.17613/ydbv-e158}},
  year         = {{2020}},
}

@inproceedings{29009,
  abstract     = {{With the growth in number and variety of RDF datasets comes an in- creasing need for both scalable and accurate solutions to support link discovery at instance level within and across these datasets. In contrast to ontology matching, most linking frameworks rely solely on string similarities to this end. The limited use of semantic similarities when linking instances is partly due to the current literature stating that they (1) do not improve the F-measure of instance linking approaches and (2) are impractical to use because they lack time efficiency. We revisit the combination of string and semantic similarities for linking instances. Contrary to the literature, our results suggest that this combination can improve the F-measure achieved by instance linking systems when the combination of the measures is performed by a machine learning approach. To achieve this in- sight, we had to address the scalability of semantic similarities. We hence present a framework for the rapid computation of semantic similarities based on edge counting. This runtime improvement allowed us to run an evaluation of 5 bench- mark datasets. Our results suggest that combining string and semantic similarities can improve the F-measure by up to 6% absolute.}},
  author       = {{Georgala, Kleanthi and Röder, Michael and Sherif, Mohamed and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Proceedings of Ontology Matching Workshop 2020}},
  keywords     = {{2020 dice simba sherif hecate ngonga knowgraphs sys:relevantFor:limboproject limboproject sys:relevantFor:infai sys:relevantFor:bis limes limbo opal roeder georgala}},
  title        = {{{Applying edge-counting semantic similarities to Link Discovery: Scalability and Accuracy}}},
  year         = {{2020}},
}

@inproceedings{29010,
  abstract     = {{Link discovery plays a key role in the integration and use of data across RDF knowledge graphs. Active learning approaches are a common family of solutions to address the problem of learning how to compute links from users. So far, only active learning from perfect oracles has been considered in the literature. However, real oracles are often far from perfect (e.g., in crowdsourcing). We hence study the problem of learning how to compute links across knowledge graphs from noisy oracles, i.e., oracles that are not guaranteed to return correct classification results. We present a novel approach for link discovery based on a probabilistic model, with which we estimate the joint odds of the oracles’ guesses. We combine this approach with an iterative learning approach based on refinements. The resulting method, Ligon, is evaluated on 10 benchmark datasets. Our results suggest that Ligon configured with 10 iterations and 10 training examples per iteration achieves more than 95% of the F-measure achieved by state-of-the-art algorithms trained with a perfect oracle. Moreover, Ligon outperforms batch learning approaches devised to be trained with small amounts of training data by more than 40% F-measure on average.}},
  author       = {{Sherif, Mohamed and Dreßler}, Kevin and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Proceedings of Ontology Matching Workshop 2020}},
  keywords     = {{2020 dice simba sherif ligon ngonga knowgraphs sys:relevantFor:limboproject limboproject sys:relevantFor:infai sys:relevantFor:bis limes limbo opal kevin}},
  title        = {{{LIGON – Link Discovery with Noisy Oracles}}},
  year         = {{2020}},
}

@inproceedings{29040,
  author       = {{Zahera, Hamada Mohamed Abdelsamee and Sherif, Mohamed}},
  booktitle    = {{Proceedings of Mining the Web of HTML-embedded Product Data Workshop (MWPD2020)}},
  keywords     = {{2020 dice zahera sherif knowgraphs sys:relevantFor:limboproject limboproject sys:relevantFor:infai sys:relevantFor:bis limes limbo opal}},
  title        = {{{ProBERT: Product Data Classification with Fine-tuning BERT Model}}},
  year         = {{2020}},
}

@inproceedings{29007,
  abstract     = {{Modern data-driven frameworks often have to process large amounts of data periodically. Hence, they often operate under time or space constraints. This also holds for Linked Data-driven frameworks when processing RDF data, in particular, when they perform link discovery tasks. In this work, we present a novel approach for link discovery under constraints pertaining to the expected recall of a link discovery task. Given a link specification, the approach aims to find a subsumed link specification that achieves a lower run time than the input specification while abiding by a predefined constraint on the expected recall it has to achieve. Our approach, dubbed LIGER, combines downward refinement oper- ators with monotonicity assumptions to detect such specifications. We evaluate our approach on seven datasets. Our results suggest that the different implemen- tations of LIGER can detect subsumed specifications that abide by expected recall constraints efficiently, thus leading to significantly shorter overall run times than our baseline.}},
  author       = {{Georgala, Kleanthi and Sherif, Mohamed and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Proceedings of Ontology Matching Workshop 2020}},
  keywords     = {{2020 dice simba sherif hecate ngonga knowgraphs sys:relevantFor:limboproject limboproject sys:relevantFor:infai sys:relevantFor:bis limes limbo opal georgala}},
  title        = {{{LIGER – Link Discovery with Partial Recall}}},
  year         = {{2020}},
}

@book{55849,
  abstract     = {{Conference proceedings of the Music Encoding Conferences 2015, 2016 and 2017 with Introduction by Giuliano Di Bacco}},
  editor       = {{Di Bacco, Giuliano and Kepper, Johannes and Roland, Perry D.}},
  keywords     = {{mec-proceedings, mec-proceedings-2016, mec-proceedings-2015, mec-proceedings-2017}},
  publisher    = {{Bavarian State Library (BSB)}},
  title        = {{{Music Encoding Conference Proceedings 2015, 2016 and 2017}}},
  doi          = {{10.15463/music-1}},
  year         = {{2019}},
}

@inproceedings{55837,
  abstract     = {{The Freischütz Digital project (FreiDi) was one of the pioneer projects employing MEI in large scale. It did not only try to encode a huge quantity of music material, it also sought to capture as many aspects of the available sources as possible, effectively creating data of almost unrivaled richness. This paper discusses the outcomes of and experiences made in the FreiDi project.}},
  author       = {{Kepper, Johannes}},
  booktitle    = {{Music Encoding Conference Proceedings 2015, 2016 and 2017}},
  editor       = {{Di Bacco, Giuliano and Kepper, Johannes and Roland, Perry}},
  keywords     = {{mec-proceedings, mec-proceedings-2016}},
  pages        = {{95–105}},
  publisher    = {{Bavarian State Library (BSB)}},
  title        = {{{Wie? Was? Entsetzen! Lessons Learned from the Freischütz Digital Project}}},
  doi          = {{10.15463/music-1}},
  year         = {{2019}},
}

