@unpublished{NazaikinskiiSavinSchulzeetal.2002, author = {Nazaikinskii, Vladimir and Savin, Anton and Schulze, Bert-Wolfgang and Sternin, Boris}, title = {Elliptic theory on manifolds with nonisolated singularities : III. The spectral flow of families of conormal symbols}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-26386}, year = {2002}, abstract = {When studyind elliptic operators on manifolds with nonisolated singularities one naturally encounters families of conormal symbols (i.e. operators elliptic with parameter p ∈ IR in the sense of Agranovich-Vishik) parametrized by the set of singular points. For homotopies of such families we define the notion of spectral flow, which in this case is an element of the K-group of the parameter space. We prove that the spectral flow is equal to the index of some family of operators on the infinite cone.}, language = {en} } @unpublished{HarutjunjanSchulze2002, author = {Harutjunjan, G. and Schulze, Bert-Wolfgang}, title = {Reduction of orders in boundary value problems without the transmission property}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-26220}, year = {2002}, abstract = {Given an algebra of pseudo-differential operators on a manifold, an elliptic element is said to be a reduction of orders, if it induces isomorphisms of Sobolev spaces with a corresponding shift of smoothness. Reductions of orders on a manifold with boundary refer to boundary value problems. We consider smooth symbols and ellipticity without additional boundary conditions which is the relevant case on a manifold with boundary. Starting from a class of symbols that has been investigated before for integer orders in boundary value problems with the transmission property we study operators of arbitrary real orders that play a similar role for operators without the transmission property. Moreover, we show that order reducing symbols have the Volterra property and are parabolic of anisotropy 1; analogous relations are formulated for arbitrary anisotropies. We finally investigate parameter-dependent operators, apply a kernel cut-off construction with respect to the parameter and show that corresponding holomorphic operator-valued Mellin symbols reduce orders in weighted Sobolev spaces on a cone with boundary.}, language = {en} } @unpublished{NazaikinskiiSavinSchulzeetal.2002, author = {Nazaikinskii, Vladimir and Savin, Anton and Schulze, Bert-Wolfgang and Sternin, Boris}, title = {Elliptic theory on manifolds with nonisolated singularities : IV. Obstructions to elliptic problems on manifolds with edges}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-26415}, year = {2002}, abstract = {The obstruction to the existence of Fredholm problems for elliptic differentail operators on manifolds with edges is a topological invariant of the operator. We give an explicit general formula for this invariant. As an application we compute this obstruction for geometric operators.}, language = {en} } @unpublished{NazaikinskiiSavinSchulzeetal.2002, author = {Nazaikinskii, Vladimir and Savin, Anton and Schulze, Bert-Wolfgang and Sternin, Boris}, title = {Elliptic theory on manifolds with nonisolated singularities : II. Products in elliptic theory on manifolds with edges}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-26335}, year = {2002}, abstract = {Exterior tensor products of elliptic operators on smooth manifolds and manifolds with conical singularities are used to obtain examples of elliptic operators on manifolds with edges that do not admit well-posed edge boundary and coboundary conditions.}, language = {en} } @unpublished{NazaikinskiiSavinSchulzeetal.2003, author = {Nazaikinskii, Vladimir and Savin, Anton and Schulze, Bert-Wolfgang and Sternin, Boris}, title = {Differential operators on manifolds with singularities : analysis and topology : Chapter 5: Manifolds with isolated singularities}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-26659}, year = {2003}, abstract = {Contents: Chapter 5: Manifolds with Isolated Singularities 5.1. Differential Operators and the Geometry of Singularities 5.1.1. How do isolated singularities arise? Examples 5.1.2. Definition and methods for the description of manifolds with isolated singularities 5.1.3. Bundles. The cotangent bundle 5.2. Asymptotics of Solutions, Function Spaces,Conormal Symbols 5.2.1. Conical singularities 5.2.2. Cuspidal singularities 5.3. A Universal Representation of Degenerate Operators and the Finiteness Theorem 5.3.1. The cylindrical representation 5.3.2. Continuity and compactness 5.3.3. Ellipticity and the finiteness theorem 5.4. Calculus of ΨDO 5.4.1. General ΨDO 5.4.2. The subalgebra of stabilizing ΨDO 5.4.3. Ellipticity and the finiteness theorem}, language = {en} } @unpublished{DinesSchulze2003, author = {Dines, Nicoleta and Schulze, Bert-Wolfgang}, title = {Mellin-edge representations of elliptic operators}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-26627}, year = {2003}, abstract = {We construct a class of elliptic operators in the edge algebra on a manifold M with an embedded submanifold Y interpreted as an edge. The ellipticity refers to a principal symbolic structure consisting of the standard interior symbol and an operator-valued edge symbol. Given a differential operator A on M for every (sufficiently large) s we construct an associated operator As in the edge calculus. We show that ellipticity of A in the usual sense entails ellipticity of As as an edge operator (up to a discrete set of reals s). Parametrices P of A then correspond to parametrices Ps of As, interpreted as Mellin-edge representations of P.}, language = {en} } @unpublished{KrainerSchulze2004, author = {Krainer, Thomas and Schulze, Bert-Wolfgang}, title = {The conormal symbolic structure of corner boundary value problems}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-26662}, year = {2004}, abstract = {Ellipticity of operators on manifolds with conical singularities or parabolicity on space-time cylinders are known to be linked to parameter-dependent operators (conormal symbols) on a corresponding base manifold. We introduce the conormal symbolic structure for the case of corner manifolds, where the base itself is a manifold with edges and boundary. The specific nature of parameter-dependence requires a systematic approach in terms of meromorphic functions with values in edge-boundary value problems. We develop here a corresponding calculus, and we construct inverses of elliptic elements.}, language = {en} } @unpublished{NazaikinskiiSavinSchulzeetal.2004, author = {Nazaikinskii, Vladimir and Savin, Anton and Schulze, Bert-Wolfgang and Sternin, Boris}, title = {Differential operators on manifolds with singularities : analysis and topology : Chapter 7: The index problem on manifolds with singularities}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-26700}, year = {2004}, abstract = {Contents: Chapter 7: The Index Problemon Manifolds with Singularities Preface 7.1. The Simplest Index Formulas 7.1.1. General properties of the index 7.1.2. The index of invariant operators on the cylinder 7.1.3. Relative index formulas 7.1.4. The index of general operators on the cylinder 7.1.5. The index of operators of the form 1 + G with a Green operator G 7.1.6. The index of operators of the form 1 + G on manifolds with edges 7.1.7. The index on bundles with smooth base and fiber having conical points 7.2. The Index Problem for Manifolds with Isolated Singularities 7.2.1. Statement of the index splitting problem 7.2.2. The obstruction to the index splitting 7.2.3. Computation of the obstruction in topological terms 7.2.4. Examples. Operators with symmetries 7.3. The Index Problem for Manifolds with Edges 7.3.1. The index excision property 7.3.2. The obstruction to the index splitting 7.4. Bibliographical Remarks}, language = {en} } @unpublished{HarutjunjanSchulze2004, author = {Harutjunjan, Gohar and Schulze, Bert-Wolfgang}, title = {Boundary problems with meromorphic symbols in cylindrical domains}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-26735}, year = {2004}, abstract = {We show relative index formulas for boundary value problems in cylindrical domains and Sobolev spaces with different weigths at ±∞. The amplitude functions are meromorphic in the axial covariable and take values in the space of boundary value problems on the cross section of the cylinder.}, language = {en} } @phdthesis{Hecher2021, author = {Hecher, Markus}, title = {Advanced tools and methods for treewidth-based problem solving}, doi = {10.25932/publishup-51251}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-512519}, school = {Universit{\"a}t Potsdam}, pages = {xv, 184}, year = {2021}, abstract = {In the last decades, there was a notable progress in solving the well-known Boolean satisfiability (Sat) problem, which can be witnessed by powerful Sat solvers. One of the reasons why these solvers are so fast are structural properties of instances that are utilized by the solver's interna. This thesis deals with the well-studied structural property treewidth, which measures the closeness of an instance to being a tree. In fact, there are many problems parameterized by treewidth that are solvable in polynomial time in the instance size when parameterized by treewidth. In this work, we study advanced treewidth-based methods and tools for problems in knowledge representation and reasoning (KR). Thereby, we provide means to establish precise runtime results (upper bounds) for canonical problems relevant to KR. Then, we present a new type of problem reduction, which we call decomposition-guided (DG) that allows us to precisely monitor the treewidth when reducing from one problem to another problem. This new reduction type will be the basis for a long-open lower bound result for quantified Boolean formulas and allows us to design a new methodology for establishing runtime lower bounds for problems parameterized by treewidth. Finally, despite these lower bounds, we provide an efficient implementation of algorithms that adhere to treewidth. Our approach finds suitable abstractions of instances, which are subsequently refined in a recursive fashion, and it uses Sat solvers for solving subproblems. It turns out that our resulting solver is quite competitive for two canonical counting problems related to Sat.}, language = {en} } @phdthesis{Seuring2000, author = {Seuring, Markus}, title = {Output space compaction for testing and concurrent checking}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0000165}, school = {Universit{\"a}t Potsdam}, year = {2000}, abstract = {In der Dissertation werden neue Entwurfsmethoden f{\"u}r Kompaktoren f{\"u}r die Ausg{\"a}nge von digitalen Schaltungen beschrieben, die die Anzahl der zu testenden Ausg{\"a}nge drastisch verkleinern und dabei die Testbarkeit der Schaltungen nur wenig oder gar nicht verschlechtern. Der erste Teil der Arbeit behandelt f{\"u}r kombinatorische Schaltungen Methoden, die die Struktur der Schaltungen beim Entwurf der Kompaktoren ber{\"u}cksichtigen. Verschiedene Algorithmen zur Analyse von Schaltungsstrukturen werden zum ersten Mal vorgestellt und untersucht. Die Komplexit{\"a}t der vorgestellten Verfahren zur Erzeugung von Kompaktoren ist linear bez{\"u}glich der Anzahl der Gatter in der Schaltung und ist damit auf sehr große Schaltungen anwendbar. Im zweiten Teil wird erstmals ein solches Verfahren f{\"u}r sequentielle Schaltkreise beschrieben. Dieses Verfahren baut im wesentlichen auf das erste auf. Der dritte Teil beschreibt eine Entwurfsmethode, die keine Informationen {\"u}ber die interne Struktur der Schaltung oder {\"u}ber das zugrundeliegende Fehlermodell ben{\"o}tigt. Der Entwurf basiert alleine auf einem vorgegebenen Satz von Testvektoren und die dazugeh{\"o}renden Testantworten der fehlerfreien Schaltung. Ein nach diesem Verfahren erzeugter Kompaktor maskiert keinen der Fehler, die durch das Testen mit den vorgegebenen Vektoren an den Ausg{\"a}ngen der Schaltung beobachtbar sind.}, language = {en} } @phdthesis{Knoechel2019, author = {Kn{\"o}chel, Jane}, title = {Model reduction of mechanism-based pharmacodynamic models and its link to classical drug effect models}, doi = {10.25932/publishup-44059}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-440598}, school = {Universit{\"a}t Potsdam}, pages = {vii, 147}, year = {2019}, abstract = {Continuous insight into biological processes has led to the development of large-scale, mechanistic systems biology models of pharmacologically relevant networks. While these models are typically designed to study the impact of diverse stimuli or perturbations on multiple system variables, the focus in pharmacological research is often on a specific input, e.g., the dose of a drug, and a specific output related to the drug effect or response in terms of some surrogate marker. To study a chosen input-output pair, the complexity of the interactions as well as the size of the models hinders easy access and understanding of the details of the input-output relationship. The objective of this thesis is the development of a mathematical approach, in specific a model reduction technique, that allows (i) to quantify the importance of the different state variables for a given input-output relationship, and (ii) to reduce the dynamics to its essential features -- allowing for a physiological interpretation of state variables as well as parameter estimation in the statistical analysis of clinical data. We develop a model reduction technique using a control theoretic setting by first defining a novel type of time-limited controllability and observability gramians for nonlinear systems. We then show the superiority of the time-limited generalised gramians for nonlinear systems in the context of balanced truncation for a benchmark system from control theory. The concept of time-limited controllability and observability gramians is subsequently used to introduce a state and time-dependent quantity called the input-response (ir) index that quantifies the importance of state variables for a given input-response relationship at a particular time. We subsequently link our approach to sensitivity analysis, thus, enabling for the first time the use of sensitivity coefficients for state space reduction. The sensitivity based ir-indices are given as a product of two sensitivity coefficients. This allows not only for a computational more efficient calculation but also for a clear distinction of the extent to which the input impacts a state variable and the extent to which a state variable impacts the output. The ir-indices give insight into the coordinated action of specific state variables for a chosen input-response relationship. Our developed model reduction technique results in reduced models that still allow for a mechanistic interpretation in terms of the quantities/state variables of the original system, which is a key requirement in the field of systems pharmacology and systems biology and distinguished the reduced models from so-called empirical drug effect models. The ir-indices are explicitly defined with respect to a reference trajectory and thereby dependent on the initial state (this is an important feature of the measure). This is demonstrated for an example from the field of systems pharmacology, showing that the reduced models are very informative in their ability to detect (genetic) deficiencies in certain physiological entities. Comparing our novel model reduction technique to the already existing techniques shows its superiority. The novel input-response index as a measure of the importance of state variables provides a powerful tool for understanding the complex dynamics of large-scale systems in the context of a specific drug-response relationship. Furthermore, the indices provide a means for a very efficient model order reduction and, thus, an important step towards translating insight from biological processes incorporated in detailed systems pharmacology models into the population analysis of clinical data.}, language = {en} } @phdthesis{Solms2017, author = {Solms, Alexander Maximilian}, title = {Integrating nonlinear mixed effects and physiologically-based modeling approaches for the analysis of repeated measurement studies}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-397070}, school = {Universit{\"a}t Potsdam}, pages = {x, 141}, year = {2017}, abstract = {During the drug discovery \& development process, several phases encompassing a number of preclinical and clinical studies have to be successfully passed to demonstrate safety and efficacy of a new drug candidate. As part of these studies, the characterization of the drug's pharmacokinetics (PK) is an important aspect, since the PK is assumed to strongly impact safety and efficacy. To this end, drug concentrations are measured repeatedly over time in a study population. The objectives of such studies are to describe the typical PK time-course and the associated variability between subjects. Furthermore, underlying sources significantly contributing to this variability, e.g. the use of comedication, should be identified. The most commonly used statistical framework to analyse repeated measurement data is the nonlinear mixed effect (NLME) approach. At the same time, ample knowledge about the drug's properties already exists and has been accumulating during the discovery \& development process: Before any drug is tested in humans, detailed knowledge about the PK in different animal species has to be collected. This drug-specific knowledge and general knowledge about the species' physiology is exploited in mechanistic physiological based PK (PBPK) modeling approaches -it is, however, ignored in the classical NLME modeling approach. Mechanistic physiological based models aim to incorporate relevant and known physiological processes which contribute to the overlying process of interest. In comparison to data--driven models they are usually more complex from a mathematical perspective. For example, in many situations, the number of model parameters outrange the number of measurements and thus reliable parameter estimation becomes more complex and partly impossible. As a consequence, the integration of powerful mathematical estimation approaches like the NLME modeling approach -which is widely used in data-driven modeling -and the mechanistic modeling approach is not well established; the observed data is rather used as a confirming instead of a model informing and building input. Another aggravating circumstance of an integrated approach is the inaccessibility to the details of the NLME methodology so that these approaches can be adapted to the specifics and needs of mechanistic modeling. Despite the fact that the NLME modeling approach exists for several decades, details of the mathematical methodology is scattered around a wide range of literature and a comprehensive, rigorous derivation is lacking. Available literature usually only covers selected parts of the mathematical methodology. Sometimes, important steps are not described or are only heuristically motivated, e.g. the iterative algorithm to finally determine the parameter estimates. Thus, in the present thesis the mathematical methodology of NLME modeling is systemically described and complemented to a comprehensive description, comprising the common theme from ideas and motivation to the final parameter estimation. Therein, new insights for the interpretation of different approximation methods used in the context of the NLME modeling approach are given and illustrated; furthermore, similarities and differences between them are outlined. Based on these findings, an expectation-maximization (EM) algorithm to determine estimates of a NLME model is described. Using the EM algorithm and the lumping methodology by Pilari2010, a new approach on how PBPK and NLME modeling can be combined is presented and exemplified for the antibiotic levofloxacin. Therein, the lumping identifies which processes are informed by the available data and the respective model reduction improves the robustness in parameter estimation. Furthermore, it is shown how apriori known factors influencing the variability and apriori known unexplained variability is incorporated to further mechanistically drive the model development. Concludingly, correlation between parameters and between covariates is automatically accounted for due to the mechanistic derivation of the lumping and the covariate relationships. A useful feature of PBPK models compared to classical data-driven PK models is in the possibility to predict drug concentration within all organs and tissue in the body. Thus, the resulting PBPK model for levofloxacin is used to predict drug concentrations and their variability within soft tissues which are the site of action for levofloxacin. These predictions are compared with data of muscle and adipose tissue obtained by microdialysis, which is an invasive technique to measure a proportion of drug in the tissue, allowing to approximate the concentrations in the interstitial fluid of tissues. Because, so far, comparing human in vivo tissue PK and PBPK predictions are not established, a new conceptual framework is derived. The comparison of PBPK model predictions and microdialysis measurements shows an adequate agreement and reveals further strengths of the presented new approach. We demonstrated how mechanistic PBPK models, which are usually developed in the early stage of drug development, can be used as basis for model building in the analysis of later stages, i.e. in clinical studies. As a consequence, the extensively collected and accumulated knowledge about species and drug are utilized and updated with specific volunteer or patient data. The NLME approach combined with mechanistic modeling reveals new insights for the mechanistic model, for example identification and quantification of variability in mechanistic processes. This represents a further contribution to the learn \& confirm paradigm across different stages of drug development. Finally, the applicability of mechanism--driven model development is demonstrated on an example from the field of Quantitative Psycholinguistics to analyse repeated eye movement data. Our approach gives new insight into the interpretation of these experiments and the processes behind.}, language = {en} } @phdthesis{Gopalakrishnan2016, author = {Gopalakrishnan, Sathej}, title = {Mathematical modelling of host-disease-drug interactions in HIV disease}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-100100}, school = {Universit{\"a}t Potsdam}, pages = {121}, year = {2016}, abstract = {The human immunodeficiency virus (HIV) has resisted nearly three decades of efforts targeting a cure. Sustained suppression of the virus has remained a challenge, mainly due to the remarkable evolutionary adaptation that the virus exhibits by the accumulation of drug-resistant mutations in its genome. Current therapeutic strategies aim at achieving and maintaining a low viral burden and typically involve multiple drugs. The choice of optimal combinations of these drugs is crucial, particularly in the background of treatment failure having occurred previously with certain other drugs. An understanding of the dynamics of viral mutant genotypes aids in the assessment of treatment failure with a certain drug combination, and exploring potential salvage treatment regimens. Mathematical models of viral dynamics have proved invaluable in understanding the viral life cycle and the impact of antiretroviral drugs. However, such models typically use simplified and coarse-grained mutation schemes, that curbs the extent of their application to drug-specific clinical mutation data, in order to assess potential next-line therapies. Statistical models of mutation accumulation have served well in dissecting mechanisms of resistance evolution by reconstructing mutation pathways under different drug-environments. While these models perform well in predicting treatment outcomes by statistical learning, they do not incorporate drug effect mechanistically. Additionally, due to an inherent lack of temporal features in such models, they are less informative on aspects such as predicting mutational abundance at treatment failure. This limits their application in analyzing the pharmacology of antiretroviral drugs, in particular, time-dependent characteristics of HIV therapy such as pharmacokinetics and pharmacodynamics, and also in understanding the impact of drug efficacy on mutation dynamics. In this thesis, we develop an integrated model of in vivo viral dynamics incorporating drug-specific mutation schemes learned from clinical data. Our combined modelling approach enables us to study the dynamics of different mutant genotypes and assess mutational abundance at virological failure. As an application of our model, we estimate in vivo fitness characteristics of viral mutants under different drug environments. Our approach also extends naturally to multiple-drug therapies. Further, we demonstrate the versatility of our model by showing how it can be modified to incorporate recently elucidated mechanisms of drug action including molecules that target host factors. Additionally, we address another important aspect in the clinical management of HIV disease, namely drug pharmacokinetics. It is clear that time-dependent changes in in vivo drug concentration could have an impact on the antiviral effect, and also influence decisions on dosing intervals. We present a framework that provides an integrated understanding of key characteristics of multiple-dosing regimens including drug accumulation ratios and half-lifes, and then explore the impact of drug pharmacokinetics on viral suppression. Finally, parameter identifiability in such nonlinear models of viral dynamics is always a concern, and we investigate techniques that alleviate this issue in our setting.}, language = {en} } @phdthesis{Schindler2023, author = {Schindler, Daniel}, title = {Mathematical modeling and simulation of protrusion-driven cell dynamics}, doi = {10.25932/publishup-61327}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-613275}, school = {Universit{\"a}t Potsdam}, pages = {VI, 161}, year = {2023}, abstract = {Amoeboid cell motility takes place in a variety of biomedical processes such as cancer metastasis, embryonic morphogenesis, and wound healing. In contrast to other forms of cell motility, it is mainly driven by substantial cell shape changes. Based on the interplay of explorative membrane protrusions at the front and a slower-acting membrane retraction at the rear, the cell moves in a crawling kind of way. Underlying these protrusions and retractions are multiple physiological processes resulting in changes of the cytoskeleton, a meshwork of different multi-functional proteins. The complexity and versatility of amoeboid cell motility raise the need for novel computational models based on a profound theoretical framework to analyze and simulate the dynamics of the cell shape. The objective of this thesis is the development of (i) a mathematical framework to describe contour dynamics in time and space, (ii) a computational model to infer expansion and retraction characteristics of individual cell tracks and to produce realistic contour dynamics, (iii) and a complementing Open Science approach to make the above methods fully accessible and easy to use. In this work, we mainly used single-cell recordings of the model organism Dictyostelium discoideum. Based on stacks of segmented microscopy images, we apply a Bayesian approach to obtain smooth representations of the cell membrane, so-called cell contours. We introduce a one-parameter family of regularized contour flows to track reference points on the contour (virtual markers) in time and space. This way, we define a coordinate system to visualize local geometric and dynamic quantities of individual contour dynamics in so-called kymograph plots. In particular, we introduce the local marker dispersion as a measure to identify membrane protrusions and retractions in a fully automated way. This mathematical framework is the basis of a novel contour dynamics model, which consists of three biophysiologically motivated components: one stochastic term, accounting for membrane protrusions, and two deterministic terms to control the shape and area of the contour, which account for membrane retractions. Our model provides a fully automated approach to infer protrusion and retraction characteristics from experimental cell tracks while being also capable of simulating realistic and qualitatively different contour dynamics. Furthermore, the model is used to classify two different locomotion types: the amoeboid and a so-called fan-shaped type. With the complementing Open Science approach, we ensure a high standard regarding the usability of our methods and the reproducibility of our research. In this context, we introduce our software publication named AmoePy, an open-source Python package to segment, analyze, and simulate amoeboid cell motility. Furthermore, we describe measures to improve its usability and extensibility, e.g., by detailed run instructions and an automatically generated source code documentation, and to ensure its functionality and stability, e.g., by automatic software tests, data validation, and a hierarchical package structure. The mathematical approaches of this work provide substantial improvements regarding the modeling and analysis of amoeboid cell motility. We deem the above methods, due to their generalized nature, to be of greater value for other scientific applications, e.g., varying organisms and experimental setups or the transition from unicellular to multicellular movement. Furthermore, we enable other researchers from different fields, i.e., mathematics, biophysics, and medicine, to apply our mathematical methods. By following Open Science standards, this work is of greater value for the cell migration community and a potential role model for other Open Science contributions.}, language = {en} } @misc{Reimann2024, type = {Master Thesis}, author = {Reimann, Hans}, title = {Towards robust inference for Bayesian filtering of linear Gaussian dynamical systems subject to additive change}, doi = {10.25932/publishup-64946}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-649469}, school = {Universit{\"a}t Potsdam}, pages = {ix, 156}, year = {2024}, abstract = {State space models enjoy wide popularity in mathematical and statistical modelling across disciplines and research fields. Frequent solutions to problems of estimation and forecasting of a latent signal such as the celebrated Kalman filter hereby rely on a set of strong assumptions such as linearity of system dynamics and Gaussianity of noise terms. We investigate fallacy in mis-specification of the noise terms, that is signal noise and observation noise, regarding heavy tailedness in that the true dynamic frequently produces observation outliers or abrupt jumps of the signal state due to realizations of these heavy tails not considered by the model. We propose a formalisation of observation noise mis-specification in terms of Huber's ε-contamination as well as a computationally cheap solution via generalised Bayesian posteriors with a diffusion Stein divergence loss resulting in the diffusion score matching Kalman filter - a modified algorithm akin in complexity to the regular Kalman filter. For this new filter interpretations of novel terms, stability and an ensemble variant are discussed. Regarding signal noise mis-specification, we propose a formalisation in the frame work of change point detection and join ideas from the popular CUSUM algo- rithm with ideas from Bayesian online change point detection to combine frequent reliability constraints and online inference resulting in a Gaussian mixture model variant of multiple Kalman filters. We hereby exploit open-end sequential probability ratio tests on the evidence of Kalman filters on observation sub-sequences for aggregated inference under notions of plausibility. Both proposed methods are combined to investigate the double mis-specification problem and discussed regarding their capabilities in reliable and well-tuned uncertainty quantification. Each section provides an introduction to required terminology and tools as well as simulation experiments on the popular target tracking task and the non-linear, chaotic Lorenz-63 system to showcase practical performance of theoretical considerations.}, language = {en} } @phdthesis{Fischer2024, author = {Fischer, Florian}, title = {Hardy inequalities on graphs}, doi = {10.25932/publishup-64773}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-647730}, school = {Universit{\"a}t Potsdam}, pages = {vi, 160}, year = {2024}, abstract = {Die Dissertation befasst sich mit einer zentralen Ungleichung der nicht-linearen Potentialtheorie, der Hardy-Ungleichung. Sie besagt, dass das nicht-lineare Energiefunktional von unten durch eine p-te Potenz einer gewichteten p-Norm abgesch{\"a}tzt werden kann, p>1. Das Energiefunktional besteht dabei aus einem Divergenz- und einem beliebigen Potentialteil. Als zugrundeliegender Raum wurden hier lokal summierbare unendliche Graphen gew{\"a}hlt. Bisherige Ver{\"o}ffentlichungen zu Hardy-Ungleichungen auf Graphen haben vor allem den Spezialfall p=2 betrachtet, oder lokal endliche Graphen ohne Potentialteil. Zwei grundlegende Fragestellungen ergeben sich nun ganz nat{\"u}rlich: F{\"u}r welche Graphen gibt {\"u}berhaupt es eine Hardy-Ungleichung? Und, wenn es sie gibt, gibt es einen Weg um ein optimales Gewicht zu erhalten? Antworten auf diese Fragen werden in Theorem 10.1 und Theorem 12.1 gegeben. Theorem 10.1 gibt eine Reihe an Charakterisierungen an; unter anderem gibt es eine Hardy-Ungleichung auf einem Graphen genau dann, wenn es eine Greensche Funktion gibt. Theorem 12.1 gibt eine explizite Formel an, um optimale Hardy-Gewichte f{\"u}r lokal endliche Graphen unter einigen technischen Zusatzannahmen zu berechnen. In Beispielen wird gezeigt, dass Greensche Funktionen gute Kandidaten sind um in die Formel eingesetzt zu werden. Um diese beiden Theoreme beweisen zu k{\"o}nnen, m{\"u}ssen eine Vielzahl an Techniken erarbeitet werden, welche in den ersten Kapiteln behandelt werden. Dabei sind eine Verallgemeinerung der Grundzustandstransformation (Theorem 4.1), ein Agmon-Allegretto-Piepenbrink-artiges Resultat (Theorem 6.1) und das Vergleichsprinzip (Proposition 7.3) besonders hervorzuheben, da diese Resultate sehr h{\"a}ufig angewendet werden und somit das Fundament der Dissertation bilden. Es wird zudem darauf Wert gelegt die Theorie durch Beispiele zu veranschaulichen. Hierbei wird der Fokus auf die nat{\"u}rlichen Zahlen, Euklidische Gitter, B{\"a}ume und Sterne gelegt. Als Abschluss werden noch eine nicht-lineare Version der Heisenbergschen Unsch{\"a}rferelation und eine Rellich-Ungleichung aus der Hardy-Ungleichung geschlussfolgert.}, language = {en} }