@article{46100, author = {{Hinrichs, Benjamin and Janssen, Daan W. and Ziebell, Jobst}}, issn = {{0022-247X}}, journal = {{Journal of Mathematical Analysis and Applications}}, keywords = {{Applied Mathematics, Analysis}}, number = {{1}}, publisher = {{Elsevier BV}}, title = {{{Super-Gaussian decay of exponentials: A sufficient condition}}}, doi = {{10.1016/j.jmaa.2023.127558}}, volume = {{528}}, year = {{2023}}, } @unpublished{46117, abstract = {{Let $X=X_1\times X_2$ be a product of two rank one symmetric spaces of non-compact type and $\Gamma$ a torsion-free discrete subgroup in $G_1\times G_2$. We show that the spectrum of $\Gamma \backslash X$ is related to the asymptotic growth of $\Gamma$ in the two direction defined by the two factors. We obtain that $L^2(\Gamma \backslash G)$ is tempered for large class of $\Gamma$.}}, author = {{Weich, Tobias and Wolf, Lasse L.}}, booktitle = {{arXiv:2304.09573}}, title = {{{Temperedness of locally symmetric spaces: The product case}}}, year = {{2023}}, } @article{46155, author = {{Bruns, Julia and Hagena, Maike and Gasteiger, Hedwig}}, issn = {{0742-051X}}, journal = {{Teaching and Teacher Education}}, keywords = {{Education}}, publisher = {{Elsevier BV}}, title = {{{Professional Development Enacted by Facilitators in the Context of Early Mathematics Education: Scaling up or Dilution of Effects?}}}, doi = {{10.1016/j.tate.2023.104270}}, volume = {{132}}, year = {{2023}}, } @book{46157, editor = {{Biehler, Rolf and Liebendörfer, Michael and Gueudet, Ghislaine and Rasmussen, Chris and Winsløw, Carl}}, isbn = {{9783031141744}}, issn = {{1869-4918}}, publisher = {{Springer International Publishing}}, title = {{{Practice-Oriented Research in Tertiary Mathematics Education}}}, doi = {{10.1007/978-3-031-14175-1}}, year = {{2023}}, } @article{46256, author = {{Ma, Yulai and Mattiolo, Davide and Steffen, Eckhard and Wolf, Isaak Hieronymus}}, issn = {{0895-4801}}, journal = {{SIAM Journal on Discrete Mathematics}}, keywords = {{General Mathematics}}, number = {{3}}, pages = {{1548--1565}}, publisher = {{Society for Industrial & Applied Mathematics (SIAM)}}, title = {{{Pairwise Disjoint Perfect Matchings in r-Edge-Connected r-Regular Graphs}}}, doi = {{10.1137/22m1500654}}, volume = {{37}}, year = {{2023}}, } @inproceedings{42163, abstract = {{The article shows how to learn models of dynamical systems from data which are governed by an unknown variational PDE. Rather than employing reduction techniques, we learn a discrete field theory governed by a discrete Lagrangian density $L_d$ that is modelled as a neural network. Careful regularisation of the loss function for training $L_d$ is necessary to obtain a field theory that is suitable for numerical computations: we derive a regularisation term which optimises the solvability of the discrete Euler--Lagrange equations. Secondly, we develop a method to find solutions to machine learned discrete field theories which constitute travelling waves of the underlying continuous PDE.}}, author = {{Offen, Christian and Ober-Blöbaum, Sina}}, booktitle = {{Geometric Science of Information}}, editor = {{Nielsen, F and Barbaresco, F}}, keywords = {{System identification, discrete Lagrangians, travelling waves}}, location = {{Saint-Malo, Palais du Grand Large, France}}, pages = {{569--579}}, publisher = {{Springer, Cham.}}, title = {{{Learning discrete Lagrangians for variational PDEs from data and detection of travelling waves}}}, doi = {{10.1007/978-3-031-38271-0_57}}, volume = {{14071}}, year = {{2023}}, } @article{29240, abstract = {{The principle of least action is one of the most fundamental physical principle. It says that among all possible motions connecting two points in a phase space, the system will exhibit those motions which extremise an action functional. Many qualitative features of dynamical systems, such as the presence of conservation laws and energy balance equations, are related to the existence of an action functional. Incorporating variational structure into learning algorithms for dynamical systems is, therefore, crucial in order to make sure that the learned model shares important features with the exact physical system. In this paper we show how to incorporate variational principles into trajectory predictions of learned dynamical systems. The novelty of this work is that (1) our technique relies only on discrete position data of observed trajectories. Velocities or conjugate momenta do not need to be observed or approximated and no prior knowledge about the form of the variational principle is assumed. Instead, they are recovered using backward error analysis. (2) Moreover, our technique compensates discretisation errors when trajectories are computed from the learned system. This is important when moderate to large step-sizes are used and high accuracy is required. For this, we introduce and rigorously analyse the concept of inverse modified Lagrangians by developing an inverse version of variational backward error analysis. (3) Finally, we introduce a method to perform system identification from position observations only, based on variational backward error analysis.}}, author = {{Ober-Blöbaum, Sina and Offen, Christian}}, issn = {{0377-0427}}, journal = {{Journal of Computational and Applied Mathematics}}, keywords = {{Lagrangian learning, variational backward error analysis, modified Lagrangian, variational integrators, physics informed learning}}, pages = {{114780}}, publisher = {{Elsevier}}, title = {{{Variational Learning of Euler–Lagrange Dynamics from Data}}}, doi = {{10.1016/j.cam.2022.114780}}, volume = {{421}}, year = {{2023}}, } @article{29236, abstract = {{The numerical solution of an ordinary differential equation can be interpreted as the exact solution of a nearby modified equation. Investigating the behaviour of numerical solutions by analysing the modified equation is known as backward error analysis. If the original and modified equation share structural properties, then the exact and approximate solution share geometric features such as the existence of conserved quantities. Conjugate symplectic methods preserve a modified symplectic form and a modified Hamiltonian when applied to a Hamiltonian system. We show how a blended version of variational and symplectic techniques can be used to compute modified symplectic and Hamiltonian structures. In contrast to other approaches, our backward error analysis method does not rely on an ansatz but computes the structures systematically, provided that a variational formulation of the method is known. The technique is illustrated on the example of symmetric linear multistep methods with matrix coefficients.}}, author = {{McLachlan, Robert and Offen, Christian}}, journal = {{Journal of Geometric Mechanics}}, keywords = {{variational integrators, backward error analysis, Euler--Lagrange equations, multistep methods, conjugate symplectic methods}}, number = {{1}}, pages = {{98--115}}, publisher = {{AIMS Press}}, title = {{{Backward error analysis for conjugate symplectic methods}}}, doi = {{10.3934/jgm.2023005}}, volume = {{15}}, year = {{2023}}, } @article{37654, abstract = {{Recently, Hamiltonian neural networks (HNN) have been introduced to incorporate prior physical knowledge when learning the dynamical equations of Hamiltonian systems. Hereby, the symplectic system structure is preserved despite the data-driven modeling approach. However, preserving symmetries requires additional attention. In this research, we enhance the HNN with a Lie algebra framework to detect and embed symmetries in the neural network. This approach allows to simultaneously learn the symmetry group action and the total energy of the system. As illustrating examples, a pendulum on a cart and a two-body problem from astrodynamics are considered.}}, author = {{Dierkes, Eva and Offen, Christian and Ober-Blöbaum, Sina and Flaßkamp, Kathrin}}, issn = {{1054-1500}}, journal = {{Chaos}}, number = {{6}}, publisher = {{AIP Publishing}}, title = {{{Hamiltonian Neural Networks with Automatic Symmetry Detection}}}, doi = {{10.1063/5.0142969}}, volume = {{33}}, year = {{2023}}, } @article{23428, abstract = {{The Koopman operator has become an essential tool for data-driven approximation of dynamical (control) systems in recent years, e.g., via extended dynamic mode decomposition. Despite its popularity, convergence results and, in particular, error bounds are still quite scarce. In this paper, we derive probabilistic bounds for the approximation error and the prediction error depending on the number of training data points; for both ordinary and stochastic differential equations. Moreover, we extend our analysis to nonlinear control-affine systems using either ergodic trajectories or i.i.d. samples. Here, we exploit the linearity of the Koopman generator to obtain a bilinear system and, thus, circumvent the curse of dimensionality since we do not autonomize the system by augmenting the state by the control inputs. To the best of our knowledge, this is the first finite-data error analysis in the stochastic and/or control setting. Finally, we demonstrate the effectiveness of the proposed approach by comparing it with state-of-the-art techniques showing its superiority whenever state and control are coupled.}}, author = {{Nüske, Feliks and Peitz, Sebastian and Philipp, Friedrich and Schaller, Manuel and Worthmann, Karl}}, journal = {{Journal of Nonlinear Science}}, title = {{{Finite-data error bounds for Koopman-based prediction and control}}}, doi = {{10.1007/s00332-022-09862-1}}, volume = {{33}}, year = {{2023}}, }