@inproceedings{63524,
  abstract     = {{Recommendation systems are essential for delivering personalized content across e-commerce and streaming services. However, traditional methods often fail in cold-start scenarios where new items lack prior interactions. Recent advances in large language models (LLMs) offer a promising alternative. In this paper, we adopt the retrieve-and-recommend framework and propose to fine-tune the LLM jointly on warm-and cold-start next-item recommendation tasks, thus, mitigating the need for separate models for both item types. We computationally compare zero-shot prompting, in-context learning, and fine-tuning using the same LLM backbone, and benchmark them against strong PLM-based baselines. Our findings provide practical insights into the trade-offs between accuracy and computational cost of these methods for next-item recommendation. To enhance reproducibility, we release the source code under https://github. com/HayaHalimeh/LLMs-For-Next-Item-Recommendation.git.}},
  author       = {{Halimeh, Haya and Freese, Florian and Müller, Oliver}},
  booktitle    = {{International Conference on Information Systems Development}},
  issn         = {{2938-5202}},
  publisher    = {{University of Gdansk, Department of Business Informatics & University of Belgrade, Faculty of Organizational Sciences}},
  title        = {{{LLMs For Warm and Cold Next-Item Recommendation: A Comparative Study across Zero-Shot Prompting, In-Context Learning and Fine-Tuning}}},
  doi          = {{10.62036/isd.2025.68}},
  year         = {{2025}},
}

