@inbook{61237,
  abstract     = {{In diesem Beitrag wird zunächst die historische Entstehung von Open Science kurz skizziert und definiert, was unter diesem Begriff zu verstehen ist. Daran anschließend werden die Open-Science-Praktiken Open Data, Open Access, Open Source, Open Methodology und Open Peer Review dargestellt und diskutiert, welche Forschungserkenntnisse zu Open Science vorhanden sind. Im Schluss werden Forschungsdesiderate aufgegriffen und die Implikationen von Open Science für die Wissenschaft erläutert.}},
  author       = {{Steinhardt, Isabel and Röwert, Ronny}},
  booktitle    = {{Hochschulforschung}},
  editor       = {{Pasternack, Peer and Reinmann, Gabi and Schneijderberg, Christian }},
  isbn         = {{9783748943334}},
  keywords     = {{Open Data, Open Access, Open Source, Open Methodology, Open Peer Review}},
  pages        = {{487--496}},
  publisher    = {{Nomos}},
  title        = {{{Open Science}}},
  doi          = {{10.5771/9783748943334-487}},
  year         = {{2025}},
}

@inproceedings{30236,
  abstract     = {{Recent reinforcement learning approaches for continuous control in wireless mobile networks have shown impressive
results. But due to the lack of open and compatible simulators, authors typically create their own simulation environments for training and evaluation. This is cumbersome and time-consuming for authors and limits reproducibility and comparability, ultimately impeding progress in the field.

To this end, we propose mobile-env, a simple and open platform for training, evaluating, and comparing reinforcement learning and conventional approaches for continuous control in mobile wireless networks. mobile-env is lightweight and implements the common OpenAI Gym interface and additional wrappers, which allows connecting virtually any single-agent or multi-agent reinforcement learning framework to the environment. While mobile-env provides sensible default values and can be used out of the box, it also has many configuration options and is easy to extend. We therefore believe mobile-env to be a valuable platform for driving meaningful progress in autonomous coordination of
wireless mobile networks.}},
  author       = {{Schneider, Stefan Balthasar and Werner, Stefan and Khalili, Ramin and Hecker, Artur and Karl, Holger}},
  booktitle    = {{IEEE/IFIP Network Operations and Management Symposium (NOMS)}},
  keywords     = {{wireless mobile networks, network management, continuous control, cognitive networks, autonomous coordination, reinforcement learning, gym environment, simulation, open source}},
  location     = {{Budapest}},
  publisher    = {{IEEE}},
  title        = {{{mobile-env: An Open Platform for Reinforcement Learning in Wireless Mobile Networks}}},
  year         = {{2022}},
}

@inproceedings{5625,
  abstract     = {{The increasing availability and deployment of open source software in personal and commercial environments makes open source software highly appealing for hackers, and others who are interested in exploiting software vulnerabilities. This deployment has resulted in a debate ?full of religion? on the security of open source software compared to that of closed source software. However, beyond such arguments, only little quantitative analysis on this research issue has taken place. We discuss the state-of-the-art of the security debate and identify shortcomings. Based on these, we propose new metrics, which allows to answer the question to what extent the review process of open source and closed source development has helped to fix vulnerabilities. We illustrate the application of some of these metrics in a case study on OpenOffice (open source software) vs. Microsoft Office (closed source software).}},
  author       = {{Schryen, Guido and Kadura, Rouven}},
  booktitle    = {{24th Annual ACM Symposium on Applied Computing}},
  keywords     = {{Open source software, Closed source software, Security, Metrics}},
  title        = {{{Open Source vs. Closed Source Software: Towards Measuring Security}}},
  year         = {{2009}},
}

@inproceedings{5647,
  abstract     = {{Reviewing literature on open source and closed source security reveals that the discussion is often determined by biased attitudes toward one of these development styles. The discussion specifically lacks appropriate metrics, methodology and hard data. This paper contributes to solving this problem by analyzing and comparing published vulnerabilities of eight open source software and nine closed source software packages, all of which are widely deployed. Thereby, it provides an extensive empirical analysis of vulnerabilities in terms of mean time between vulnerability disclosures, the development of disclosure over time, and the severity of vulnerabilities, and allows for validating models provided in the literature. The investigation reveals that (a) the mean time between vulnerability disclosures was lower for open source software in half of the cases, while the other cases show no differences, (b) in contrast to literature assumption, 14 out of 17 software packages showed a significant linear or piecewise linear correlation between time and the number of published vulnerabilities, and (c) regarding the severity of vulnerabilities, no significant differences were found between open source and closed source.}},
  author       = {{Schryen, Guido}},
  booktitle    = {{15th Americas Conference on Information Systems}},
  keywords     = {{Vulnerabilities, security, open source software, closed source software, empirical comparison}},
  title        = {{{Security of open source and closed source software: An empirical comparison of published vulnerabilities}}},
  year         = {{2009}},
}

