@inproceedings{56983,
  abstract     = {{Detecting the veracity of a statement automatically is a challenge the world is grappling with due to the vast amount of data spread across the web. Verifying a given claim typically entails validating it within the framework of supporting evidence like a retrieved piece of text. Classifying the stance of the text with respect to the claim is called stance classification. Despite advancements in automated fact-checking, most systems still rely on a substantial quantity of labeled training data, which can be costly. In this work, we avoid the costly training or fine-tuning of models by reusing pre-trained large language models together with few-shot in-context learning. Since we do not train any model, our approach ExPrompt is lightweight, demands fewer resources than other stance classification methods and can serve as a modern baseline for future developments. At the same time, our evaluation shows that our approach is able to outperform former state-of-the-art stance classification approaches regarding accuracy by at least 2 percent. Our scripts and data used in this paper are available at https://github.com/dice-group/ExPrompt.}},
  author       = {{Qudus, Umair and Röder, Michael and Vollmers, Daniel and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{Proceedings of the 33rd ACM International Conference on Information and Knowledge Management}},
  isbn         = {{79-8-4007-0436-9/24/10}},
  keywords     = {{Stance Classification, Few-shot in-context learning, Pre-trained large language models}},
  location     = {{Boise, ID, USA}},
  pages        = {{3994 -- 3999}},
  publisher    = {{ACM}},
  title        = {{{ExPrompt: Augmenting Prompts Using Examples as Modern Baseline for Stance Classification}}},
  doi          = {{10.1145/3627673.3679923}},
  volume       = {{9}},
  year         = {{2024}},
}

@inproceedings{57240,
  abstract     = {{Validating assertions before adding them to a knowledge graph is an essential part of its creation and maintenance. Due to the sheer size of knowledge graphs, automatic fact-checking approaches have been developed. These approaches rely on reference knowledge to decide whether a given assertion is correct. Recent hybrid approaches achieve good results by including several knowledge sources. However, it is often impractical to provide a sheer quantity of textual knowledge or generate embedding models to leverage these hybrid approaches. We present FaVEL, an approach that uses algorithm selection and ensemble learning to amalgamate several existing fact-checking approaches that rely solely on a reference knowledge graph and, hence, use fewer resources than current hybrid approaches. For our evaluation, we create updated versions of two existing datasets and a new dataset dubbed FaVEL-DS. Our evaluation compares our approach to 15 fact-checking approaches—including the state-of-the-art approach HybridFC—on 3 datasets. Our results demonstrate that FaVEL outperforms all other approaches significantly by at least 0.04 in terms of the area under the ROC curve. Our source code, datasets, and evaluation results are open-source and can be found at https://github.com/dice-group/favel.}},
  author       = {{Qudus, Umair and Röder, Michael and Tatkeu Pekarou, Franck Lionel and Morim da Silva, Ana Alexandra and Ngonga Ngomo, Axel-Cyrille}},
  booktitle    = {{EKAW 2024}},
  editor       = {{Rospocher, Marco}},
  keywords     = {{fact checking, ensemble learning, transfer learning, knowledge management.}},
  location     = {{Amsterdam, Netherlands}},
  title        = {{{FaVEL: Fact Validation Ensemble Learning}}},
  year         = {{2024}},
}

