@inproceedings{63434,
  author       = {{Hoffmann, Max}},
  booktitle    = {{Proceedings of the Fourteenth Congress of the European Society for Research in Mathematics Education (CERME14)}},
  editor       = {{Bosch, Marianna and Bolondi, Giorgio and Carreira, Susana and Michael, Gaidoschik and Camilla, Spagnolo}},
  keywords     = {{hoffmann, reviewed, proceedings}},
  title        = {{{Using scriptwriting as a response format for interface tasks: Exemplary analyses in the context of symmetry}}},
  year         = {{2025}},
}

@inproceedings{55834,
  abstract     = {{Performance of music in the home was the means by which most works were received before the advent of audio recordings and broadcasts, yet the notation sources that form our primary record of this culture have not been the subject of comprehensive or methodical study. Choices made by arrangers adapting music for domestic consumption – of instrumentation, abbreviation, or simplification – reflect the musical life of the 19th century, and can inform our understanding alongside contemporary accounts such as newspapers, adverts, and diaries. This position paper gives the background, motivation, and proposed approach of research currently being undertaken within the Beethoven in the House project. This will include a study of Steiner editions of Beethoven’s 7th and 8th Symphonies and Wellingtons Sieg, making a detailed comparison between arrangements, systematically identifying a core common to multiple versions, and asking if this reflects the stated values of the publisher. A second survey will look for patterns across a larger sample of lesser-known and poorly catalogued scores, collating emergent indicators of arrangers’ motivations within a narrative of the domestic market – the music industry of its day. Both studies will innovate digital methods which characterise arrangements as music encodings, including ‘sparse’ approaches to notation and annotation.}},
  author       = {{Page, Kevin R. and Kepper, Johannes and Siegert, Christine and Hankinson, Andrew and Lewis, David}},
  booktitle    = {{Music Encoding Conference Proceedings 2021}},
  editor       = {{Münnich, Stefan and Rizo, David}},
  isbn         = {{978-84-13-02173-7}},
  keywords     = {{BitH, mec-proceedings, mec-proceedings-2021}},
  pages        = {{117–123}},
  publisher    = {{Humanities Commons}},
  title        = {{{Beethoven in the House: Digital Studies of Domestic Music Arrangements}}},
  doi          = {{10.17613/389b-xx73}},
  year         = {{2022}},
}

@inproceedings{55841,
  abstract     = {{For musicologists, the collation of multiple sources of the same work is a frequent task. By comparing different witnesses, they seek to identify variation, describe dependencies, and ultimately understand the genesis and transmission of (musical) works. Obviously, the need for such comparison is independent from the medium in which a musical work is manifested. In computing, comparing files for difference is a common task, and the well-known Unix utility diff is almost 46 years old. However, diff, like many other such tools, operates on plain text. While many music encoding formats based on plain text exist, formats used in the field of Digital Humanities are typically based on XML. There are dedicated algorithms for comparing XML as well, but they only focus on the syntax of XML, but not the semantic structures modelled into such standards as MEI. MEI seeks to describe musical structures, and the XML syntax is just a means to express those structures. A diff tool for music should focus on comparing musical structures, but not the specifics of their serialization into a file format. In {Beethovens Werkstatt}, a 16-year project focussed on exploring the concepts and requirements of digital genetic editions of music, based on and arguing with examples from Ludwig van Beethoven, a case-bound diff tool for music was developed. The following paper discusses how that specific tool can be generalized, and which use cases such a tool may support.}},
  author       = {{Herold, Kristin and Kepper, Johannes and Mo, Ran and Seipelt, Agnes Regina}},
  booktitle    = {{Music Encoding Conference Proceedings 2020}},
  editor       = {{De Luca, Elsa and Flanders, Julia}},
  keywords     = {{mec-proceedings, mec-proceedings-2020}},
  pages        = {{59–66}},
  publisher    = {{Humanities Commons}},
  title        = {{{MusicDiff – A Diff Tool for MEI}}},
  doi          = {{10.17613/ydbv-e158}},
  year         = {{2020}},
}

@book{55849,
  abstract     = {{Conference proceedings of the Music Encoding Conferences 2015, 2016 and 2017 with Introduction by Giuliano Di Bacco}},
  editor       = {{Di Bacco, Giuliano and Kepper, Johannes and Roland, Perry D.}},
  keywords     = {{mec-proceedings, mec-proceedings-2016, mec-proceedings-2015, mec-proceedings-2017}},
  publisher    = {{Bavarian State Library (BSB)}},
  title        = {{{Music Encoding Conference Proceedings 2015, 2016 and 2017}}},
  doi          = {{10.15463/music-1}},
  year         = {{2019}},
}

@inproceedings{55837,
  abstract     = {{The Freischütz Digital project (FreiDi) was one of the pioneer projects employing MEI in large scale. It did not only try to encode a huge quantity of music material, it also sought to capture as many aspects of the available sources as possible, effectively creating data of almost unrivaled richness. This paper discusses the outcomes of and experiences made in the FreiDi project.}},
  author       = {{Kepper, Johannes}},
  booktitle    = {{Music Encoding Conference Proceedings 2015, 2016 and 2017}},
  editor       = {{Di Bacco, Giuliano and Kepper, Johannes and Roland, Perry}},
  keywords     = {{mec-proceedings, mec-proceedings-2016}},
  pages        = {{95–105}},
  publisher    = {{Bavarian State Library (BSB)}},
  title        = {{{Wie? Was? Entsetzen! Lessons Learned from the Freischütz Digital Project}}},
  doi          = {{10.15463/music-1}},
  year         = {{2019}},
}

@article{29032,
  abstract     = {{Large amounts of geo-spatial information have been made available with the growth of the Web of Data. While discovering links between resources on the Web of Data has been shown to be a demanding task, discovering links between geo-spatial resources proves to be even more challenging. This is partly due to the resources being described by the means of vector geometry. Especially, discrepancies in granularity and error measurements across data sets render the selection of appropriate distance measures for geo-spatial resources difficult. In this paper, we survey existing literature for point-set measures that can be used to measure the similarity of vector geometries. We then present and evaluate the ten measures that we derived from literature. We evaluate these measures with respect to their time-efficiency and their robustness against discrepancies in measurement and in granularity. To this end, we use samples of real data sets of different granularity as input for our evaluation framework. The results obtained on three different data sets suggest that most distance approaches can be led to scale. Moreover, while some distance measures are significantly slower than other measures, distance measure based on means, surjections and sums of minimal distances are robust against the different types of discrepancies.}},
  author       = {{Sherif, Mohamed and Ngonga Ngomo, Axel-Cyrille}},
  journal      = {{Semantic Web Journal}},
  keywords     = {{2017 group\_aksw slipo sys:relevantFor:infai sys:relevantFor:bis ngonga simba DICE sherif geo-distance limes}},
  title        = {{{A Systematic Survey of Point Set Distance Measures for Link Discovery}}},
  year         = {{2017}},
}

@inproceedings{29018,
  author       = {{Sherif, Mohamed and Dreßler, Kevin and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Proceedings of Ontology Matching Workshop 2017}},
  keywords     = {{2017 dice simba sherif radon ngonga slipo sage geiser hobbit group\_aksw sys:relevantFor:infai sys:relevantFor:bis limes linkinglod sake diesel kevin sys:relevantFor:leds leds}},
  title        = {{{RADON results for OAEI 2017}}},
  year         = {{2017}},
}

@inproceedings{29024,
  abstract     = {{A significant portion of the evolution of Linked Data datasets lies in updating the links to other datasets. An important challenge when aiming to update these links automatically under the open-world assumption is the fact that usually only positive examples for the links exist. We address this challenge by presenting and evaluating WOMBAT , a novel approach for the discovery of links between knowledge bases that relies exclusively on positive examples. WOMBAT is based on generalisation via an upward refinement operator to traverse the space of link specification. We study the theoretical characteristics of WOMBAT and evaluate it on 8 different benchmark datasets. Our evaluation suggests that WOMBAT outperforms state-of-the-art supervised approaches while relying on less information. Moreover, our evaluation suggests that WOMBAT ’s pruning algorithm allows it to scale well even on large datasets.}},
  author       = {{Sherif, Mohamed and Ngonga Ngomo, Axel-Cyrille and Lehmann, Jens}},
  booktitle    = {{14th Extended Semantic Web Conference, Portorož, Slovenia, 28th May - 1st June 2017}},
  keywords     = {{2017 group\_aksw sys:relevantFor:geoknow sys:relevantFor:infai sys:relevantFor:bis ngonga simba dice sherif group\_aksw geoknow wombat lehmann MOLE}},
  publisher    = {{Springer}},
  title        = {{{WOMBAT - A Generalization Approach for Automatic Link Discovery}}},
  year         = {{2017}},
}

