@article{57472,
  abstract     = {{In this paper we introduce, in a Hilbert space setting, a second order dynamical system with asymptotically vanishing damping and vanishing Tikhonov regularization that approaches a multiobjective optimization problem with convex and differentiable components of the objective function. Trajectory solutions are shown to exist in finite dimensions. We prove fast convergence of the function values, quantified in terms of a merit function. Based on the regime considered, we establish both weak and, in some cases, strong convergence of trajectory solutions toward a weak Pareto optimal solution. To achieve this, we apply Tikhonov regularization individually to each component of the objective function. This work extends results from single objective convex optimization into the multiobjective setting.}},
  author       = {{Bot, Radu Ioan and Sonntag, Konstantin}},
  journal      = {{Journal of Mathematical Analysis and Applications}},
  keywords     = {{Pareto optimization, Lyapunov analysis, gradient-like dynamical systems, inertial dynamics, asymptotic vanishing damping, Tikhonov regularization, strong convergence}},
  title        = {{{Inertial dynamics with vanishing Tikhonov regularization for multobjective optimization}}},
  year         = {{2025}},
}

@phdthesis{62750,
  abstract     = {{Diese Dissertation enthält Beiträge zum Bereich der Mehrzieloptimierung mit einem Fokus auf unbeschränkten Problemen, die auf einem allgemeinen Hilbertraum definiert sind. Für Mehrzieloptimierungsprobleme mit lokal Lipschitz-stetigen Zielfunktionen definieren wir ein multikriterielles Subdifferential, das wir erstmals im Kontext allgemeiner Hilberträume analysieren. Aufbauend auf diesen theoretischen Untersuchungen präsentieren wir ein Abstiegsverfahren, bei welchem in jeder Iteration eine Abstiegsrichtung mittels einer numerischen Approximation des multikriteriellen Subdifferentials bestimmt wird. Im Kontext konvexer, stetig differenzierbarer Zielfunktionen mit Lipschitz-stetigen Gradienten, führen wir eine Familie von dynamischen Gradientensystemen mit Trägheitsterm ein, die bekannte kontinuierliche Systeme aus der skalaren Optimierung verallgemeinern. Wir stellen drei neue Systeme vor: eines mit konstanter Dämpfung, eines mit asymptotisch abnehmender Dämpfung und eines, das zusätzlich eine zeitabhängige Tikhonov-Regularisierung beinhaltet. Aufbauend auf den Untersuchungen der neuen dynamischen Gradientensysteme, entwickeln wir ein beschleunigtes Gradientenverfahren zur Mehrzieloptimierung, das auf einer Diskretisierung des multikriteriellen Gradientensystems mit asymptotisch abnehmender Dämpfung beruht. Das hergeleitete Verfahren bewahrt die günstigen Konvergenzeigenschaften des kontinuierlichen Systems und erreicht eine schnellere Konvergenz als klassische Verfahren.}},
  author       = {{Sonntag, Konstantin}},
  publisher    = {{Paderborn University}},
  title        = {{{First-order methods and gradient dynamical systems for multiobjective optimization}}},
  doi          = {{10.17619/UNIPB/1-2457}},
  year         = {{2025}},
}

@article{20731,
  abstract     = {{We present a novel algorithm that allows us to gain detailed insight into the effects of sparsity in linear and nonlinear optimization, which is of great importance in many scientific areas such as image and signal processing, medical imaging, compressed sensing, and machine learning (e.g., for the training of neural networks). Sparsity is an important feature to ensure robustness against noisy data, but also to find models that are interpretable and easy to analyze due to the small number of relevant terms. It is common practice to enforce sparsity by adding the ℓ1-norm as a weighted penalty term. In order to gain a better understanding and to allow for an informed model selection, we directly solve the corresponding multiobjective optimization problem (MOP) that arises when we minimize the main objective and the ℓ1-norm simultaneously. As this MOP is in general non-convex for nonlinear objectives, the weighting method will fail to provide all optimal compromises. To avoid this issue, we present a continuation method which is specifically tailored to MOPs with two objective functions one of which is the ℓ1-norm. Our method can be seen as a generalization of well-known homotopy methods for linear regression problems to the nonlinear case. Several numerical examples - including neural network training - demonstrate our theoretical findings and the additional insight that can be gained by this multiobjective approach.}},
  author       = {{Bieker, Katharina and Gebken, Bennet and Peitz, Sebastian}},
  journal      = {{IEEE Transactions on Pattern Analysis and Machine Intelligence}},
  number       = {{11}},
  pages        = {{7797--7808}},
  publisher    = {{IEEE}},
  title        = {{{On the Treatment of Optimization Problems with L1 Penalty Terms via Multiobjective Continuation}}},
  doi          = {{10.1109/TPAMI.2021.3114962}},
  volume       = {{44}},
  year         = {{2022}},
}

