@article{27426,
  abstract     = {{Regularization is used in many different areas of optimization when solutions
are sought which not only minimize a given function, but also possess a certain
degree of regularity. Popular applications are image denoising, sparse
regression and machine learning. Since the choice of the regularization
parameter is crucial but often difficult, path-following methods are used to
approximate the entire regularization path, i.e., the set of all possible
solutions for all regularization parameters. Due to their nature, the
development of these methods requires structural results about the
regularization path. The goal of this article is to derive these results for
the case of a smooth objective function which is penalized by a piecewise
differentiable regularization term. We do this by treating regularization as a
multiobjective optimization problem. Our results suggest that even in this
general case, the regularization path is piecewise smooth. Moreover, our theory
allows for a classification of the nonsmooth features that occur in between
smooth parts. This is demonstrated in two applications, namely support-vector
machines and exact penalty methods.}},
  author       = {{Gebken, Bennet and Bieker, Katharina and Peitz, Sebastian}},
  journal      = {{Journal of Global Optimization}},
  number       = {{3}},
  pages        = {{709--741}},
  title        = {{{On the structure of regularization paths for piecewise differentiable regularization terms}}},
  doi          = {{10.1007/s10898-022-01223-2}},
  volume       = {{85}},
  year         = {{2023}},
}

@inproceedings{30125,
  abstract     = {{We present an approach for guaranteed constraint satisfaction by means of data-based optimal control, where the model is unknown and has to be obtained from measurement data. To this end, we utilize the Koopman framework and an eDMD-based bilinear surrogate modeling approach for control systems to show an error bound on predicted observables, i.e., functions of the state. This result is then applied to the constraints of the optimal control problem to show that satisfaction of tightened constraints in the purely data-based surrogate model implies constraint satisfaction for the original system.}},
  author       = {{Schaller, Manuel and Worthmann, Karl and Philipp, Friedrich and Peitz, Sebastian and Nüske, Feliks}},
  booktitle    = {{IFAC-PapersOnLine}},
  number       = {{1}},
  pages        = {{169--174}},
  title        = {{{Towards reliable data-based optimal and predictive control using extended DMD}}},
  doi          = {{10.1016/j.ifacol.2023.02.029}},
  volume       = {{56}},
  year         = {{2023}},
}

@unpublished{46579,
  abstract     = {{The Koopman operator has become an essential tool for data-driven analysis, prediction and control of complex systems, the main reason being the enormous potential of identifying linear function space representations of nonlinear
dynamics from measurements. Until now, the situation where for large-scale systems, we (i) only have access to partial observations (i.e., measurements, as is very common for experimental data) or (ii) deliberately perform coarse
graining (for efficiency reasons) has not been treated to its full extent. In this paper, we address the pitfall associated with this situation, that the classical EDMD algorithm does not automatically provide a Koopman operator approximation for the underlying system if we do not carefully select the number of observables. Moreover, we show that symmetries in the system dynamics can be carried over to the Koopman operator, which allows us to massively increase the model efficiency. We also briefly draw a connection to domain decomposition techniques for partial differential equations and present numerical evidence using the Kuramoto--Sivashinsky equation.}},
  author       = {{Peitz, Sebastian and Harder, Hans and Nüske, Feliks and Philipp, Friedrich and Schaller, Manuel and Worthmann, Karl}},
  booktitle    = {{arXiv:2307.15325}},
  title        = {{{Partial observations, coarse graining and equivariance in Koopman  operator theory for large-scale dynamical systems}}},
  year         = {{2023}},
}

@article{23428,
  abstract     = {{The Koopman operator has become an essential tool for data-driven approximation of dynamical (control) systems in recent years, e.g., via extended dynamic mode decomposition. Despite its popularity, convergence results and, in particular, error bounds are still quite scarce. In this paper, we derive probabilistic bounds for the approximation error and the prediction error depending on the number of training data points; for both ordinary and stochastic differential equations. Moreover, we extend our analysis to nonlinear control-affine systems using either ergodic trajectories or i.i.d.
samples. Here, we exploit the linearity of the Koopman generator to obtain a bilinear system and, thus, circumvent the curse of dimensionality since we do not autonomize the system by augmenting the state by the control inputs. To the
best of our knowledge, this is the first finite-data error analysis in the stochastic and/or control setting. Finally, we demonstrate the effectiveness of the proposed approach by comparing it with state-of-the-art techniques showing its superiority whenever state and control are coupled.}},
  author       = {{Nüske, Feliks and Peitz, Sebastian and Philipp, Friedrich and Schaller, Manuel and Worthmann, Karl}},
  journal      = {{Journal of Nonlinear Science}},
  title        = {{{Finite-data error bounds for Koopman-based prediction and control}}},
  doi          = {{10.1007/s00332-022-09862-1}},
  volume       = {{33}},
  year         = {{2023}},
}

@article{21600,
  abstract     = {{Many problems in science and engineering require an efficient numerical approximation of integrals or solutions to differential equations. For systems with rapidly changing dynamics, an equidistant discretization is often inadvisable as it results in prohibitively large errors or computational effort. To this end, adaptive schemes, such as solvers based on Runge–Kutta pairs, have been developed which adapt the step size based on local error estimations at each step. While the classical schemes apply very generally and are highly efficient on regular systems, they can behave suboptimally when an inefficient step rejection mechanism is triggered by structurally complex systems such as chaotic systems. To overcome these issues, we propose a method to tailor numerical schemes to the problem class at hand. This is achieved by combining simple, classical quadrature rules or ODE solvers with data-driven time-stepping controllers. Compared with learning solution operators to ODEs directly, it generalizes better to unseen initial data as our approach employs classical numerical schemes as base methods. At the same time it can make use of identified structures of a problem class and, therefore, outperforms state-of-the-art adaptive schemes. Several examples demonstrate superior efficiency. Source code is available at https://github.com/lueckem/quadrature-ML.}},
  author       = {{Dellnitz, Michael and Hüllermeier, Eyke and Lücke, Marvin and Ober-Blöbaum, Sina and Offen, Christian and Peitz, Sebastian and Pfannschmidt, Karlson}},
  journal      = {{SIAM Journal on Scientific Computing}},
  number       = {{2}},
  pages        = {{A579--A595}},
  title        = {{{Efficient time stepping for numerical integration using reinforcement  learning}}},
  doi          = {{10.1137/21M1412682}},
  volume       = {{45}},
  year         = {{2023}},
}

@article{46784,
  author       = {{Wallscheid, Oliver and Peitz, Sebastian and Stenner, Jan and Weber, Daniel and Boshoff, Septimus and Meyer, Marvin and Chidananda, Vikas and Schweins, Oliver}},
  issn         = {{2475-9066}},
  journal      = {{Journal of Open Source Software}},
  keywords     = {{General Earth and Planetary Sciences, General Environmental Science}},
  number       = {{89}},
  publisher    = {{The Open Journal}},
  title        = {{{ElectricGrid.jl - A Julia-based modeling and simulationtool for power electronics-driven electric energy grids}}},
  doi          = {{10.21105/joss.05616}},
  volume       = {{8}},
  year         = {{2023}},
}

@inproceedings{46813,
  abstract     = {{Modelling of dynamic systems plays an important role in many engineering disciplines. Two different approaches are physical modelling and data‐driven modelling, both of which have their respective advantages and disadvantages. By combining these two approaches, hybrid models can be created in which the respective disadvantages are mitigated, with discrepancy models being a particular subclass. Here, the basic system behaviour is described physically, that is, in the form of differential equations. Inaccuracies resulting from insufficient modelling or numerics lead to a discrepancy between the measurements and the model, which can be compensated by a data‐driven error correction term. Since discrepancy methods still require a large amount of measurement data, this paper investigates the extent to which a single discrepancy model can be trained for a physical model with additional parameter dependencies without the need for retraining. As an example, a damped electromagnetic oscillating circuit is used. The physical model is realised by a differential equation describing the electric current, considering only inductance and capacitance; dissipation due to resistance is neglected. This creates a discrepancy between measurement and model, which is corrected by a data‐driven model. In the experiments, the inductance and the capacity are varied. It is found that the same data‐driven model can only be used if additional parametric dependencies in the data‐driven term are considered as well.}},
  author       = {{Wohlleben, Meike Claudia and Muth, Lars and Peitz, Sebastian and Sextro, Walter}},
  booktitle    = {{Proceedings in Applied Mathematics and Mechanics}},
  issn         = {{1617-7061}},
  keywords     = {{Electrical and Electronic Engineering, Atomic and Molecular Physics, and Optics}},
  publisher    = {{Wiley}},
  title        = {{{Transferability of a discrepancy model for the dynamics of electromagnetic oscillating circuits}}},
  doi          = {{10.1002/pamm.202300039}},
  year         = {{2023}},
}

@inbook{16296,
  abstract     = {{Multiobjective optimization plays an increasingly important role in modern
applications, where several objectives are often of equal importance. The task
in multiobjective optimization and multiobjective optimal control is therefore
to compute the set of optimal compromises (the Pareto set) between the
conflicting objectives. Since the Pareto set generally consists of an infinite
number of solutions, the computational effort can quickly become challenging
which is particularly problematic when the objectives are costly to evaluate as
is the case for models governed by partial differential equations (PDEs). To
decrease the numerical effort to an affordable amount, surrogate models can be
used to replace the expensive PDE evaluations. Existing multiobjective
optimization methods using model reduction are limited either to low parameter
dimensions or to few (ideally two) objectives. In this article, we present a
combination of the reduced basis model reduction method with a continuation
approach using inexact gradients. The resulting approach can handle an
arbitrary number of objectives while yielding a significant reduction in
computing time.}},
  author       = {{Banholzer, Stefan and Gebken, Bennet and Dellnitz, Michael and Peitz, Sebastian and Volkwein, Stefan}},
  booktitle    = {{Non-Smooth and Complementarity-Based Distributed Parameter Systems}},
  editor       = {{Michael, Hintermüller and Roland, Herzog and Christian, Kanzow and Michael, Ulbrich and Stefan, Ulbrich}},
  isbn         = {{978-3-030-79392-0}},
  pages        = {{43--76}},
  publisher    = {{Springer}},
  title        = {{{ROM-Based Multiobjective Optimization of Elliptic PDEs via Numerical Continuation}}},
  doi          = {{10.1007/978-3-030-79393-7_3}},
  year         = {{2022}},
}

@inbook{30294,
  abstract     = {{With the ever increasing capabilities of sensors and controllers, autonomous driving is quickly becoming a reality. This disruptive change in the automotive industry poses major challenges for manufacturers as well as suppliers as entirely new design and testing strategies have to be developed to remain competitive. Most importantly, the complexity of autonomously driving vehicles in a complex, uncertain, and safety-critical environment requires new testing procedures to cover the almost infinite range of potential scenarios.}},
  author       = {{Peitz, Sebastian and Dellnitz, Michael and Bannenberg, Sebastian}},
  booktitle    = {{German Success Stories in Industrial Mathematics}},
  editor       = {{Bock, H. G. and Küfer, K.-H. and Maas, P. and Milde, A. and Schulz, V.}},
  isbn         = {{9783030814540}},
  issn         = {{1612-3956}},
  publisher    = {{Springer International Publishing}},
  title        = {{{Efficient Virtual Design and Testing of Autonomous Vehicles}}},
  doi          = {{10.1007/978-3-030-81455-7_23}},
  volume       = {{35}},
  year         = {{2022}},
}

@article{29673,
  abstract     = {{Koopman operator theory has been successfully applied to problems from various research areas such as fluid dynamics, molecular dynamics, climate science, engineering, and biology. Applications include detecting metastable or coherent sets, coarse-graining, system identification, and control. There is an intricate connection between dynamical systems driven by stochastic differential equations and quantum mechanics. In this paper, we compare the ground-state transformation and Nelson's stochastic mechanics and demonstrate how data-driven methods developed for the approximation of the Koopman operator can be used to analyze quantum physics problems. Moreover, we exploit the relationship between Schrödinger operators and stochastic control problems to show that modern data-driven methods for stochastic control can be used to solve the stationary or imaginary-time Schrödinger equation. Our findings open up a new avenue towards solving Schrödinger's equation using recently developed tools from data science.}},
  author       = {{Klus, Stefan and Nüske, Feliks and Peitz, Sebastian}},
  journal      = {{Journal of Physics A: Mathematical and Theoretical}},
  number       = {{31}},
  pages        = {{314002}},
  publisher    = {{IOP Publishing Ltd.}},
  title        = {{{Koopman analysis of quantum systems}}},
  doi          = {{10.1088/1751-8121/ac7d22}},
  volume       = {{55}},
  year         = {{2022}},
}

@unpublished{33150,
  abstract     = {{In this article, we build on previous work to present an optimization algorithm for nonlinearly constrained multi-objective optimization problems. The algorithm combines a surrogate-assisted derivative-free trust-region approach with the filter method known from single-objective optimization. Instead of the true objective and constraint functions, so-called fully linear models are employed and we show how to deal with the gradient inexactness in the composite step setting, adapted from single-objective optimization as well. Under standard assumptions, we prove convergence of a subset of iterates to a quasi-stationary point and if constraint qualifications hold, then the limit point is also a KKT-point of the multi-objective problem.}},
  author       = {{Berkemeier, Manuel Bastian and Peitz, Sebastian}},
  booktitle    = {{arXiv:2208.12094}},
  title        = {{{Multi-Objective Trust-Region Filter Method for Nonlinear Constraints using Inexact Gradients}}},
  year         = {{2022}},
}

@article{20731,
  abstract     = {{We present a novel algorithm that allows us to gain detailed insight into the effects of sparsity in linear and nonlinear optimization, which is of great importance in many scientific areas such as image and signal processing, medical imaging, compressed sensing, and machine learning (e.g., for the training of neural networks). Sparsity is an important feature to ensure robustness against noisy data, but also to find models that are interpretable and easy to analyze due to the small number of relevant terms. It is common practice to enforce sparsity by adding the ℓ1-norm as a weighted penalty term. In order to gain a better understanding and to allow for an informed model selection, we directly solve the corresponding multiobjective optimization problem (MOP) that arises when we minimize the main objective and the ℓ1-norm simultaneously. As this MOP is in general non-convex for nonlinear objectives, the weighting method will fail to provide all optimal compromises. To avoid this issue, we present a continuation method which is specifically tailored to MOPs with two objective functions one of which is the ℓ1-norm. Our method can be seen as a generalization of well-known homotopy methods for linear regression problems to the nonlinear case. Several numerical examples - including neural network training - demonstrate our theoretical findings and the additional insight that can be gained by this multiobjective approach.}},
  author       = {{Bieker, Katharina and Gebken, Bennet and Peitz, Sebastian}},
  journal      = {{IEEE Transactions on Pattern Analysis and Machine Intelligence}},
  number       = {{11}},
  pages        = {{7797--7808}},
  publisher    = {{IEEE}},
  title        = {{{On the Treatment of Optimization Problems with L1 Penalty Terms via Multiobjective Continuation}}},
  doi          = {{10.1109/TPAMI.2021.3114962}},
  volume       = {{44}},
  year         = {{2022}},
}

@inbook{29727,
  author       = {{Wohlleben, Meike Claudia and Bender, Amelie and Peitz, Sebastian and Sextro, Walter}},
  booktitle    = {{Machine Learning, Optimization, and Data Science}},
  isbn         = {{9783030954697}},
  issn         = {{0302-9743}},
  publisher    = {{Springer International Publishing}},
  title        = {{{Development of a Hybrid Modeling Methodology for Oscillating Systems with Friction}}},
  doi          = {{10.1007/978-3-030-95470-3_8}},
  year         = {{2022}},
}

@article{21337,
  abstract     = {{We present a flexible trust region descend algorithm for unconstrained and
convexly constrained multiobjective optimization problems. It is targeted at
heterogeneous and expensive problems, i.e., problems that have at least one
objective function that is computationally expensive. The method is
derivative-free in the sense that neither need derivative information be
available for the expensive objectives nor are gradients approximated using
repeated function evaluations as is the case in finite-difference methods.
Instead, a multiobjective trust region approach is used that works similarly to
its well-known scalar pendants. Local surrogate models constructed from
evaluation data of the true objective functions are employed to compute
possible descent directions. In contrast to existing multiobjective trust
region algorithms, these surrogates are not polynomial but carefully
constructed radial basis function networks. This has the important advantage
that the number of data points scales linearly with the parameter space
dimension. The local models qualify as fully linear and the corresponding
general scalar framework is adapted for problems with multiple objectives.
Convergence to Pareto critical points is proven and numerical examples
illustrate our findings.}},
  author       = {{Berkemeier, Manuel Bastian and Peitz, Sebastian}},
  issn         = {{2297-8747}},
  journal      = {{Mathematical and Computational Applications}},
  number       = {{2}},
  title        = {{{Derivative-Free Multiobjective Trust Region Descent Method Using Radial  Basis Function Surrogate Models}}},
  doi          = {{10.3390/mca26020031}},
  volume       = {{26}},
  year         = {{2021}},
}

@article{16867,
  abstract     = {{In this article, we present an efficient descent method for locally Lipschitz
continuous multiobjective optimization problems (MOPs). The method is realized
by combining a theoretical result regarding the computation of descent
directions for nonsmooth MOPs with a practical method to approximate the
subdifferentials of the objective functions. We show convergence to points
which satisfy a necessary condition for Pareto optimality. Using a set of test
problems, we compare our method to the multiobjective proximal bundle method by
M\"akel\"a. The results indicate that our method is competitive while being
easier to implement. While the number of objective function evaluations is
larger, the overall number of subgradient evaluations is lower. Finally, we
show that our method can be combined with a subdivision algorithm to compute
entire Pareto sets of nonsmooth MOPs.}},
  author       = {{Gebken, Bennet and Peitz, Sebastian}},
  journal      = {{Journal of Optimization Theory and Applications}},
  pages        = {{696--723}},
  title        = {{{An efficient descent method for locally Lipschitz multiobjective optimization problems}}},
  doi          = {{10.1007/s10957-020-01803-w}},
  volume       = {{188}},
  year         = {{2021}},
}

@article{16295,
  abstract     = {{It is a challenging task to identify the objectives on which a certain decision was based, in particular if several, potentially conflicting criteria are equally important and a continuous set of optimal compromise decisions exists. This task can be understood as the inverse problem of multiobjective optimization, where the goal is to find the objective function vector of a given Pareto set. To this end, we present a method to construct the objective function vector of an unconstrained multiobjective optimization problem (MOP) such that the Pareto critical set contains a given set of data points with prescribed KKT multipliers. If such an MOP can not be found, then the method instead produces an MOP whose Pareto critical set is at least close to the data points. The key idea is to consider the objective function vector in the multiobjective KKT conditions as variable and then search for the objectives that minimize the Euclidean norm of the resulting system of equations. By expressing the objectives in a finite-dimensional basis, we transform this problem into a homogeneous, linear system of equations that can be solved efficiently. Potential applications of this approach include the identification of objectives (both from clean and noisy data) and the construction of surrogate models for expensive MOPs.}},
  author       = {{Gebken, Bennet and Peitz, Sebastian}},
  journal      = {{Journal of Global Optimization}},
  pages        = {{3--29}},
  publisher    = {{Springer}},
  title        = {{{Inverse multiobjective optimization: Inferring decision criteria from data}}},
  doi          = {{10.1007/s10898-020-00983-z}},
  volume       = {{80}},
  year         = {{2021}},
}

@article{16294,
  abstract     = {{Model predictive control is a prominent approach to construct a feedback
control loop for dynamical systems. Due to real-time constraints, the major
challenge in MPC is to solve model-based optimal control problems in a very
short amount of time. For linear-quadratic problems, Bemporad et al. have
proposed an explicit formulation where the underlying optimization problems are
solved a priori in an offline phase. In this article, we present an extension
of this concept in two significant ways. We consider nonlinear problems and -
more importantly - problems with multiple conflicting objective functions. In
the offline phase, we build a library of Pareto optimal solutions from which we
then obtain a valid compromise solution in the online phase according to a
decision maker's preference. Since the standard multi-parametric programming
approach is no longer valid in this situation, we instead use interpolation
between different entries of the library. To reduce the number of problems that
have to be solved in the offline phase, we exploit symmetries in the dynamical
system and the corresponding multiobjective optimal control problem. The
results are verified using two different examples from autonomous driving.}},
  author       = {{Ober-Blöbaum, Sina and Peitz, Sebastian}},
  journal      = {{International Journal of Robust and Nonlinear Control}},
  pages        = {{380--403}},
  title        = {{{Explicit multiobjective model predictive control for nonlinear systems  with symmetries}}},
  doi          = {{10.1002/rnc.5281}},
  volume       = {{31(2)}},
  year         = {{2021}},
}

@inbook{17411,
  abstract     = {{Many dynamical systems possess symmetries, e.g. rotational and translational invariances of mechanical systems. These can be beneficially exploited in the design of numerical optimal control methods. We present a model predictive control scheme which is based on a library of precomputed motion primitives. The primitives are equivalence classes w.r.t. the symmetry of the optimal control problems. Trim primitives as relative equilibria w.r.t. this symmetry, play a crucial role in the algorithm. The approach is illustrated using an academic mobile robot example.}},
  author       = {{Flaßkamp, Kathrin and Ober-Blöbaum, Sina and Peitz, Sebastian}},
  booktitle    = {{Advances in Dynamics, Optimization and Computation}},
  editor       = {{Junge, Oliver and Schütze, Oliver and Froyland, Gary and Ober-Blöbaum, Sina and Padberg-Gehle, Kathrin}},
  isbn         = {{9783030512637}},
  issn         = {{2198-4182}},
  publisher    = {{Springer}},
  title        = {{{Symmetry in Optimal Control: A Multiobjective Model Predictive Control Approach}}},
  doi          = {{10.1007/978-3-030-51264-4_9}},
  year         = {{2020}},
}

@article{10596,
  abstract     = {{Multi-objective optimization is an active field of research that has many applications. Owing to its success and because decision-making processes are becoming more and more complex, there is a recent trend for incorporating many objectives into such problems. The challenge with such problems, however, is that the dimensions of the solution sets—the so-called Pareto sets and fronts—grow with the number of objectives. It is thus no longer possible to compute or to approximate the entire solution set of a given problem that contains many (e.g. more than three) objectives. On the other hand, the computation of single solutions (e.g. via scalarization methods) leads to unsatisfying results in many cases, even if user preferences are incorporated. In this article, the Pareto Explorer tool is presented—a global/local exploration tool for the treatment of many-objective optimization problems (MaOPs). In the first step, a solution of the problem is computed via a global search algorithm that ideally already includes user preferences. In the second step, a local search along the Pareto set/front of the given MaOP is performed in user specified directions. For this, several continuation-like procedures are proposed that can incorporate preferences defined in decision, objective, or in weight space. The applicability and usefulness of Pareto Explorer is demonstrated on benchmark problems as well as on an application from industrial laundry design.}},
  author       = {{Schütze, Oliver and Cuate, Oliver and Martín, Adanay and Peitz, Sebastian and Dellnitz, Michael}},
  issn         = {{0305-215X}},
  journal      = {{Engineering Optimization}},
  number       = {{5}},
  pages        = {{832--855}},
  title        = {{{Pareto Explorer: a global/local exploration tool for many-objective optimization problems}}},
  doi          = {{10.1080/0305215x.2019.1617286}},
  volume       = {{52}},
  year         = {{2020}},
}

@article{16288,
  abstract     = {{We derive a data-driven method for the approximation of the Koopman generator called gEDMD, which can be regarded as a straightforward extension of EDMD (extended dynamic mode decomposition). This approach is applicable to deterministic and stochastic dynamical systems. It can be used for computing eigenvalues, eigenfunctions, and modes of the generator and for system identification. In addition to learning the governing equations of deterministic systems, which then reduces to SINDy (sparse identification of nonlinear dynamics), it is possible to identify the drift and diffusion terms of stochastic differential equations from data. Moreover, we apply gEDMD to derive coarse-grained models of high-dimensional systems, and also to determine efficient model predictive control strategies. We highlight relationships with other methods and demonstrate the efficacy of the proposed methods using several guiding examples and prototypical molecular dynamics problems.}},
  author       = {{Klus, Stefan and Nüske, Feliks and Peitz, Sebastian and Niemann, Jan-Hendrik and Clementi, Cecilia and Schütte, Christof}},
  issn         = {{0167-2789}},
  journal      = {{Physica D: Nonlinear Phenomena}},
  title        = {{{Data-driven approximation of the Koopman generator: Model reduction, system identification, and control}}},
  doi          = {{10.1016/j.physd.2020.132416}},
  volume       = {{406}},
  year         = {{2020}},
}

