@article{58472,
  abstract     = {{The “kill chain”—involving the analysis of data by human users of military technologies, the understanding of that data, and human decisions—has fast been replaced by the “kill cloud” that necessitates, allows, and exacerbates increased thirst for domination, violence against distant populations, and a culture of experimentation with human lives. This commentary reports an interdisciplinary discussion organised by the Disruption Network Lab that brought together whistleblowers, artists, and experts investigating the impact of artificial intelligence and other emerging technologies on networked warfare. Exposing the problematics of networked warfare and the kill cloud, their colonial overtones, effects on human subjects in real life, erroneous scientific rationalities, and the (business) practices and logics that enable this algorithmic machinery of violence. The conference took place from the 29th of November to the 1st of December 2024 at the Kunstquartier Bethanien in Berlin, Germany.}},
  author       = {{Bhila, Ishmael}},
  issn         = {{2662-1975}},
  journal      = {{Digital War}},
  keywords     = {{autonomous weapons systems, algorithmic warfare, cloud computing, war on terror}},
  number       = {{4}},
  publisher    = {{Springer Science and Business Media LLC}},
  title        = {{{Investigating the kill cloud: information warfare, autonomous weapons & AI}}},
  doi          = {{10.1057/s42984-025-00101-x}},
  volume       = {{6}},
  year         = {{2025}},
}

@article{56279,
  abstract     = {{<jats:title>Abstract</jats:title><jats:p>Biases in artificial intelligence have been flagged in academic and policy literature for years. Autonomous weapons systems—defined as weapons that use sensors and algorithms to select, track, target, and engage targets without human intervention—have the potential to mirror systems of societal inequality which reproduce algorithmic bias. This article argues that the problem of engrained algorithmic bias poses a greater challenge to autonomous weapons systems developers than most other risks discussed in the Group of Governmental Experts on Lethal Autonomous Weapons Systems (GGE on LAWS), which should be reflected in the outcome documents of these discussions. This is mainly because it takes longer to rectify a discriminatory algorithm than it does to issue an apology for a mistake that occurs occasionally. Highly militarised states have controlled both the discussions and their outcomes, which have focused on issues that are pertinent to them while ignoring what is existential for the rest of the world. Various calls from civil society, researchers, and smaller states for a legally binding instrument to regulate the development and use of autonomous weapons systems have always included the call for recognising algorithmic bias in autonomous weapons, which has not been reflected in discussion outcomes. This paper argues that any ethical framework developed for the regulation of autonomous weapons systems should, in detail, ensure that the development and use of autonomous weapons systems do not prejudice against vulnerable sections of (global) society.</jats:p>}},
  author       = {{Bhila, Ishmael}},
  issn         = {{2662-1975}},
  journal      = {{Digital War}},
  publisher    = {{Springer Science and Business Media LLC}},
  title        = {{{Putting algorithmic bias on top of the agenda in the discussions on autonomous weapons systems}}},
  doi          = {{10.1057/s42984-024-00094-z}},
  year         = {{2024}},
}

