@inproceedings{48232,
  author       = {{Mirbabaie, Milad and Rieskamp, Jonas and Hofeditz, Lennart and Stieglitz, Stefan}},
  title        = {{{Breaking Down Barriers: How Conversational Agents Facilitate Open Science and Data Sharing}}},
  year         = {{2023}},
}

@inproceedings{48468,
  author       = {{Rieskamp, Jonas and Mirbabaie, Milad and Langer, Marie and Kocur, Alexander}},
  title        = {{{From Virality to Veracity: Examining False Information on Telegram vs. Twitter}}},
  year         = {{2023}},
}

@article{36834,
  abstract     = {{<jats:title>Abstract</jats:title><jats:p>Increasing average temperatures and heat waves are having devasting impacts on human health and well-being but studies of heat impacts and how people adapt are rare and often confined to specific locations. In this study, we explore how analysis of conversations on social media can be used to understand how people feel about heat waves and how they respond. We collected global Twitter data over four months (from January to April 2022) using predefined hashtags about heat waves. Topic modelling identified five topics. The largest (one-third of all tweets) was related to sports events. The remaining two-thirds could be allocated to four topics connected to communication about climate-related heat or heat waves. Two of these were on the impacts of heat and heat waves (health impacts 20%; social impacts 16%), one was on extreme weather and climate change attribution (17%) and the last one was on perceptions and warning (13%). The number of tweets in each week corresponded well with major heat wave occurrences in Argentina, Australia, the USA and South Asia (India and Pakistan), indicating that people posting tweets were aware of the threat from heat and its impacts on the society. Among the words frequently used within the topic ‘Social impacts’ were ‘air-conditioning’ and ‘electricity’, suggesting links between coping strategies and financial pressure. Apart from analysing the content of tweets, new insights were also obtained from analysing how people engaged with Twitter tweets about heat or heat waves. We found that tweets posted early, and which were then shared by other influential Twitter users, were among the most popular. Finally, we found that the most popular tweets belonged to individual scientists or respected news outlets, with no evidence that misinformation about climate change-related heat is widespread.
</jats:p>}},
  author       = {{Zander, Kerstin K. and Rieskamp, Jonas and Mirbabaie, Milad and Alazab, Mamoun and Nguyen, Duy}},
  issn         = {{0921-030X}},
  journal      = {{Natural Hazards}},
  keywords     = {{Earth and Planetary Sciences (miscellaneous), Atmospheric Science, Water Science and Technology}},
  publisher    = {{Springer Science and Business Media LLC}},
  title        = {{{Responses to heat waves: what can Twitter data tell us?}}},
  doi          = {{10.1007/s11069-023-05824-2}},
  year         = {{2023}},
}

@inproceedings{33490,
  abstract     = {{Algorithmic fairness in Information Systems (IS) is a concept that aims to mitigate systematic discrimination and bias in automated decision-making. However, previous research argued that different fairness criteria are often incompatible. In hiring, AI is used to assess and rank applicants according to their fit for vacant positions. However, various types of bias also exist for AI-based algorithms (e.g., using biased historical data). To reduce AI’s bias and thereby unfair treatment, we conducted a systematic literature review to identify suitable strategies for the context of hiring. We identified nine fundamental articles in this context and extracted four types of approaches to address unfairness in AI, namely pre-process, in-process, post-process, and feature selection. Based on our findings, we (a) derived a research agenda for future studies and (b) proposed strategies for practitioners who design and develop AIs for hiring purposes.}},
  author       = {{Rieskamp, Jonas and Hofeditz, Lennart and Mirbabaie, Milad and Stieglitz, Stefan}},
  booktitle    = {{Proceedings of the Annual Hawaii International Conference on System Sciences (HICSS)}},
  keywords     = {{fairness in AI, SLR, hiring, AI implementation, AI-based algorithms}},
  title        = {{{Approaches to Improve Fairness when Deploying AI-based Algorithms in Hiring – Using a Systematic Literature Review to Guide Future Research}}},
  year         = {{2023}},
}

@misc{34025,
  abstract     = {{Controversial topics like abortion or capital punishment inherently lack
of correct answers or the right way to deal with. Thus, in order to find what is true,
what is good, or what should be done, the involved parties need to debate. For the
purpose of forming an opinion on a controversial topic someone needs to take in a
lot of arguments on that topic to gather information which can be a time-consuming
process. To increase efficiency, someone can use an argument search engine to quicken
the retrieval of relevant arguments. Although the usage of such a service reduces the
time to find arguments, there is still a lot of textual data that needs to be read. To this
end, computational summarization approaches for arguments can limit the necessary
time for information review by generating short snippets capturing the main gist of
each argument. Yet, we suggest that approaches that consider one argument at a
time show potential for further improvement in terms of efficiency during information
review. In fact, arguments on the same topic, like those retrieved by a search engine for
a certain query, partially cover the same content, e. g. arguments regarding the death
penalty probably use deterrence as a point in favor of it. However, if the same aspect
is central in multiple arguments, their snippets reflect this, which leads to redundancy
among the snippets. Consequently, someone interested in gathering information on a
controversial topic does not necessarily find new information in each snippet he or she
reads.
We introduce the task of Contrastive Argument Summarization (CAS) which addresses
the aforementioned problem regarding existing argument summarization. An approach
that addresses CAS aims to produce contrastive snippets for each argument in a set
of topic-related arguments. A contrastive snippet should represent the main gist of its
argument, it should account for the argumentative nature of the text, and it should be
dissimilar to the other topic-related arguments in order to reduce redundancy among
the snippets.
We propose two approaches addressing CAS, namely an extended version of the
LexRank derivation by Alshomary et al. (2020), and an advancement of the work
by Bista et al. (2020). Additionally, we develop two automatic measures to assess to
which extent the snippets of one set are opposed. For evaluation, we compile a corpus
using the args.me search engine Wachsmuth et al. (2017b) to come close to the suggested area of application. Moreover, we conduct a manual annotation study to assess
approaches’ effectiveness. We find that the graph-based approach is superior when it
comes to contrastiveness (i. e. snippets being dissimilar to topic-related arguments),
and that the second approach outperforms the previous one and the unmodified version of Alshomary et al. (2020) when it comes to representativeness (i. e. snippets
capturing the main gist of an argument).}},
  author       = {{Rieskamp, Jonas}},
  title        = {{{Contrastive Argument Summarization Using Supervised and Unsupervised Machine Learning}}},
  year         = {{2022}},
}

@inproceedings{33519,
  author       = {{Marx, Julian and Rieskamp, Jonas and Mirbabaie, Milad}},
  booktitle    = {{Proceedings of the 33rd Australasian Conference on Information Systems}},
  location     = {{Melbourne}},
  title        = {{{‘Just a Normal Day in the Metaverse’ – Distraction Conflicts of Knowledge Work in Virtual Environments}}},
  year         = {{2022}},
}

@inproceedings{32247,
  author       = {{Alshomary, Milad and Rieskamp, Jonas and Wachsmuth, Henning}},
  booktitle    = {{Proceedings of the 9th International Conference on Computational Models of Argument}},
  pages        = {{21 -- 31}},
  title        = {{{Generating Contrastive Snippets for Argument Search}}},
  doi          = {{http://dx.doi.org/10.3233/FAIA220138}},
  year         = {{2022}},
}

