@phdthesis{Meyerhoefer2003, author = {Meyerh{\"o}fer, Wolfram}, title = {Was testen Tests? Objektiv-hermeneutische Analysen am Beispiel von TIMSS und PISA}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-12848}, school = {Universit{\"a}t Potsdam}, year = {2003}, abstract = {Als ich anfing, ein Thema f{\"u}r meine Promotion zu erarbeiten, fand ich Massentests ziemlich beeindruckend. TIMSS: {\"u}ber 500000 Sch{\"u}ler getestet. PISA: 180000 Sch{\"u}ler getestet. Ich wollte diese Datenbasis nutzen, um Erkenntnisse f{\"u}r die Gestaltung von Unterricht zu gewinnen. Leider kam ich damit nicht weit. Je tiefer ich mich mit den Tests und den dahinterstehenden Theorien befasste, desto deutlicher sch{\"a}lte sich heraus, dass mit diesen Tests keine neue Erkenntnis generiert werden kann. Fast alle Schlussfolgerungen, die aus den Tests gezogen werden, konnten gar nicht aus den Tests selbst gewonnen werden. Ich konzentrierte mich zunehmend auf die Testaufgaben, weil die Geltung der Aussage eines Tests an der Aufgabe erzeugt wird: In der Aufgabe gerinnt das, was die Tester als „mathematische Leistungsf{\"a}higkeit" konstruieren. Der Sch{\"u}ler wiederum hat nur die Aufgabe vor sich. Es gibt nur „gel{\"o}st" (ein Punkt) und „ungel{\"o}st" (kein Punkt). Damit der Sch{\"u}ler den Punkt bekommt, muss er an der richtigen Stelle ankreuzen, oder er muss etwas hinschrei-ben, wof{\"u}r der Auswerter einen Punkt gibt. In der Dissertation wird untersucht, was die Aufgaben testen, was also alles in das Konstrukt von „mathematischer Leistungsf{\"a}higkeit" einfließt, und ob es das ist, was der Test testen soll. Es stellte sich durchaus erstaunliches heraus: - Oftmals gibt es so viele M{\"o}glichkeiten, zur gew{\"u}nschten L{\"o}sung (die nicht in jedem Fall die richtige L{\"o}sung ist) zu gelangen, dass man nicht benennen kann, welche F{\"a}higkeit die Aufgabe eigentlich misst. Das Konstrukt „mathematische Leistungsf{\"a}higkeit" wird damit zu einem zuf{\"a}lligen. - Es werden Komponenten von Testf{\"a}higkeit mitgemessen: Viele Aufgaben enthalten Irritationen, welche von testerfahrenen Sch{\"u}lern leichter {\"u}berwunden werden k{\"o}nnen als von testunerfahrenen. Es gibt Aufgaben, die gel{\"o}st werden k{\"o}nnen, ohne dass man {\"u}ber die F{\"a}higkeit verf{\"u}gt, die getestet werden soll. Umgekehrt gibt es Aufgaben, die man eventuell nicht l{\"o}sen kann, obwohl man {\"u}ber diese F{\"a}higkeit verf{\"u}gt. Als Kernkompetenz von Testf{\"a}higkeit stellt sich heraus, weder das gestellte mathematische Problem noch die angeblichen realen Proble-me ernst zu nehmen, sondern sich statt dessen auf das zu konzentrieren, was die Tester angekreuzt oder hinge-schrieben sehen wollen. Prinzipiell erweist es sich als g{\"u}nstig, mittelm{\"a}ßig zu arbeiten, auf intellektuelle Tiefe in der Auseinandersetzung mit den Aufgaben also zu verzichten. - Man kann bei Multiple-Choice-Tests raten. Die PISA-Gruppe behauptet zwar, dieses Problem technisch {\"u}ber-winden zu k{\"o}nnen, dies erweist sich aber als Fehleinsch{\"a}tzung. - Sowohl bei TIMSS als auch bei PISA stellt sich heraus, dass die vorgeblich verwendeten didaktischen und psychologischen Theorien lediglich theoretische M{\"a}ntel f{\"u}r eine theoriearme Testerstellung sind. Am Beispiel der Theorie der mentalen Situationsmodelle (zur Bearbeitung von realit{\"a}tsnahen Aufgaben) wird dies ausf{\"u}hrlich exemplarisch ausgearbeitet. Das Problem reproduziert sich in anderen Theoriefeldern. Die Tests werden nicht durch Operationalisierungen von Messkonstrukten erstellt, sondern durch systematisches Zusammenst{\"u}ckeln von Aufgaben. - Bei PISA sollte „Mathematical Literacy" getestet werden. Verk{\"u}rzt sollte das die F{\"a}higkeit sein, „die Rolle, die Mathematik in der Welt spielt, zu erkennen und zu verstehen, begr{\"u}ndete mathematische Urteile abzugeben und sich auf eine Weise mit der Mathematik zu befassen, die den Anforderungen des gegenw{\"a}rtigen und k{\"u}nftigen Lebens einer Person als eines konstruktiven, engagierten und reflektierten B{\"u}rgers entspricht" (PISA-Eigendarstellung). Von all dem kann angesichts der Aufgaben keine Rede sein. - Bei der Untersuchung des PISA-Tests dr{\"a}ngte sich ein mathematikdidaktischer Habitus auf, der eine separate Untersuchung erzwang. Ich habe ihn unter dem Stichwort der „Abkehr von der Sache" zusammengefasst. Er ist gepr{\"a}gt von Zerst{\"o}rungen des Mathematischen bei gleichzeitiger {\"U}berbetonung des Fachsprachlichen und durch Verwerfungen des Mathematischen und des Realen bei realit{\"a}tsnahen Aufgaben. Letzteres gr{\"u}ndet in der Nicht-beachtung der Authentizit{\"a}t sowohl des Realen als auch des Mathematischen. Die Arbeit versammelt neben den Untersuchungen zu TIMSS und PISA ein ausf{\"u}hrliches Kapitel {\"u}ber das Prob-lem des Testens und eine Darstellung der Methodologie und Praxis der Objektiven Hermeneutik.}, language = {de} } @phdthesis{Hanisch2011, author = {Hanisch, Florian}, title = {Variational problems on supermanifolds}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59757}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {In this thesis, we discuss the formulation of variational problems on supermanifolds. Supermanifolds incorporate bosonic as well as fermionic degrees of freedom. Fermionic fields take values in the odd part of an appropriate Grassmann algebra and are thus showing an anticommutative behaviour. However, a systematic treatment of these Grassmann parameters requires a description of spaces as functors, e.g. from the category of Grassmann algberas into the category of sets (or topological spaces, manifolds). After an introduction to the general ideas of this approach, we use it to give a description of the resulting supermanifolds of fields/maps. We show that each map is uniquely characterized by a family of differential operators of appropriate order. Moreover, we demonstrate that each of this maps is uniquely characterized by its component fields, i.e. by the coefficients in a Taylor expansion w.r.t. the odd coordinates. In general, the component fields are only locally defined. We present a way how to circumvent this limitation. In fact, by enlarging the supermanifold in question, we show that it is possible to work with globally defined components. We eventually use this formalism to study variational problems. More precisely, we study a super version of the geodesic and a generalization of harmonic maps to supermanifolds. Equations of motion are derived from an energy functional and we show how to decompose them into components. Finally, in special cases, we can prove the existence of critical points by reducing the problem to equations from ordinary geometric analysis. After solving these component equations, it is possible to show that their solutions give rise to critical points in the functor spaces of fields.}, language = {en} } @phdthesis{Vu2014, author = {Vu, Dinh Phuong}, title = {Using video study to investigate eighth-grade mathematics classrooms in Vietnam}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72464}, school = {Universit{\"a}t Potsdam}, pages = {273}, year = {2014}, abstract = {The International Project for the Evaluation of Educational Achievement (IEA) was formed in the 1950s (Postlethwaite, 1967). Since that time, the IEA has conducted many studies in the area of mathematics, such as the First International Mathematics Study (FIMS) in 1964, the Second International Mathematics Study (SIMS) in 1980-1982, and a series of studies beginning with the Third International Mathematics and Science Study (TIMSS) which has been conducted every 4 years since 1995. According to Stigler et al. (1999), in the FIMS and the SIMS, U.S. students achieved low scores in comparison with students in other countries (p. 1). The TIMSS 1995 "Videotape Classroom Study" was therefore a complement to the earlier studies conducted to learn "more about the instructional and cultural processes that are associated with achievement" (Stigler et al., 1999, p. 1). The TIMSS Videotape Classroom Study is known today as the TIMSS Video Study. From the findings of the TIMSS 1995 Video Study, Stigler and Hiebert (1999) likened teaching to "mountain ranges poking above the surface of the water," whereby they implied that we might see the mountaintops, but we do not see the hidden parts underneath these mountain ranges (pp. 73-78). By watching the videotaped lessons from Germany, Japan, and the United States again and again, they discovered that "the systems of teaching within each country look similar from lesson to lesson. At least, there are certain recurring features [or patterns] that typify many of the lessons within a country and distinguish the lessons among countries" (pp. 77-78). They also discovered that "teaching is a cultural activity," so the systems of teaching "must be understood in relation to the cultural beliefs and assumptions that surround them" (pp. 85, 88). From this viewpoint, one of the purposes of this dissertation was to study some cultural aspects of mathematics teaching and relate the results to mathematics teaching and learning in Vietnam. Another research purpose was to carry out a video study in Vietnam to find out the characteristics of Vietnamese mathematics teaching and compare these characteristics with those of other countries. In particular, this dissertation carried out the following research tasks: - Studying the characteristics of teaching and learning in different cultures and relating the results to mathematics teaching and learning in Vietnam - Introducing the TIMSS, the TIMSS Video Study and the advantages of using video study in investigating mathematics teaching and learning - Carrying out the video study in Vietnam to identify the image, scripts and patterns, and the lesson signature of eighth-grade mathematics teaching in Vietnam - Comparing some aspects of mathematics teaching in Vietnam and other countries and identifying the similarities and differences across countries - Studying the demands and challenges of innovating mathematics teaching methods in Vietnam - lessons from the video studies Hopefully, this dissertation will be a useful reference material for pre-service teachers at education universities to understand the nature of teaching and develop their teaching career.}, language = {en} } @phdthesis{Angwenyi2019, author = {Angwenyi, David}, title = {Time-continuous state and parameter estimation with application to hyperbolic SPDEs}, doi = {10.25932/publishup-43654}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-436542}, school = {Universit{\"a}t Potsdam}, pages = {xi, 101}, year = {2019}, abstract = {Data assimilation has been an active area of research in recent years, owing to its wide utility. At the core of data assimilation are filtering, prediction, and smoothing procedures. Filtering entails incorporation of measurements' information into the model to gain more insight into a given state governed by a noisy state space model. Most natural laws are governed by time-continuous nonlinear models. For the most part, the knowledge available about a model is incomplete; and hence uncertainties are approximated by means of probabilities. Time-continuous filtering, therefore, holds promise for wider usefulness, for it offers a means of combining noisy measurements with imperfect model to provide more insight on a given state. The solution to time-continuous nonlinear Gaussian filtering problem is provided for by the Kushner-Stratonovich equation. Unfortunately, the Kushner-Stratonovich equation lacks a closed-form solution. Moreover, the numerical approximations based on Taylor expansion above third order are fraught with computational complications. For this reason, numerical methods based on Monte Carlo methods have been resorted to. Chief among these methods are sequential Monte-Carlo methods (or particle filters), for they allow for online assimilation of data. Particle filters are not without challenges: they suffer from particle degeneracy, sample impoverishment, and computational costs arising from resampling. The goal of this thesis is to:— i) Review the derivation of Kushner-Stratonovich equation from first principles and its extant numerical approximation methods, ii) Study the feedback particle filters as a way of avoiding resampling in particle filters, iii) Study joint state and parameter estimation in time-continuous settings, iv) Apply the notions studied to linear hyperbolic stochastic differential equations. The interconnection between It{\^o} integrals and stochastic partial differential equations and those of Stratonovich is introduced in anticipation of feedback particle filters. With these ideas and motivated by the variants of ensemble Kalman-Bucy filters founded on the structure of the innovation process, a feedback particle filter with randomly perturbed innovation is proposed. Moreover, feedback particle filters based on coupling of prediction and analysis measures are proposed. They register a better performance than the bootstrap particle filter at lower ensemble sizes. We study joint state and parameter estimation, both by means of extended state spaces and by use of dual filters. Feedback particle filters seem to perform well in both cases. Finally, we apply joint state and parameter estimation in the advection and wave equation, whose velocity is spatially varying. Two methods are employed: Metropolis Hastings with filter likelihood and a dual filter comprising of Kalman-Bucy filter and ensemble Kalman-Bucy filter. The former performs better than the latter.}, language = {en} } @phdthesis{Mera2017, author = {Mera, Azal Jaafar Musa}, title = {The Navier-Stokes equations for elliptic quasicomplexes}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-398495}, school = {Universit{\"a}t Potsdam}, pages = {101}, year = {2017}, abstract = {The classical Navier-Stokes equations of hydrodynamics are usually written in terms of vector analysis. More promising is the formulation of these equations in the language of differential forms of degree one. In this way the study of Navier-Stokes equations includes the analysis of the de Rham complex. In particular, the Hodge theory for the de Rham complex enables one to eliminate the pressure from the equations. The Navier-Stokes equations constitute a parabolic system with a nonlinear term which makes sense only for one-forms. A simpler model of dynamics of incompressible viscous fluid is given by Burgers' equation. This work is aimed at the study of invariant structure of the Navier-Stokes equations which is closely related to the algebraic structure of the de Rham complex at step 1. To this end we introduce Navier-Stokes equations related to any elliptic quasicomplex of first order differential operators. These equations are quite similar to the classical Navier-Stokes equations including generalised velocity and pressure vectors. Elimination of the pressure from the generalised Navier-Stokes equations gives a good motivation for the study of the Neumann problem after Spencer for elliptic quasicomplexes. Such a study is also included in the work.We start this work by discussion of Lam{\´e} equations within the context of elliptic quasicomplexes on compact manifolds with boundary. The non-stationary Lam{\´e} equations form a hyperbolic system. However, the study of the first mixed problem for them gives a good experience to attack the linearised Navier-Stokes equations. On this base we describe a class of non-linear perturbations of the Navier-Stokes equations, for which the solvability results still hold.}, language = {en} } @phdthesis{LopezValencia2023, author = {Lopez Valencia, Diego Andres}, title = {The Milnor-Moore and Poincar{\´e}-Birkhoff-Witt theorems in the locality set up and the polar structure of Shintani zeta functions}, doi = {10.25932/publishup-59421}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-594213}, school = {Universit{\"a}t Potsdam}, pages = {147}, year = {2023}, abstract = {This thesis bridges two areas of mathematics, algebra on the one hand with the Milnor-Moore theorem (also called Cartier-Quillen-Milnor-Moore theorem) as well as the Poincar{\´e}-Birkhoff-Witt theorem, and analysis on the other hand with Shintani zeta functions which generalise multiple zeta functions. The first part is devoted to an algebraic formulation of the locality principle in physics and generalisations of classification theorems such as Milnor-Moore and Poincar{\´e}-Birkhoff-Witt theorems to the locality framework. The locality principle roughly says that events that take place far apart in spacetime do not infuence each other. The algebraic formulation of this principle discussed here is useful when analysing singularities which arise from events located far apart in space, in order to renormalise them while keeping a memory of the fact that they do not influence each other. We start by endowing a vector space with a symmetric relation, named the locality relation, which keeps track of elements that are "locally independent". The pair of a vector space together with such relation is called a pre-locality vector space. This concept is extended to tensor products allowing only tensors made of locally independent elements. We extend this concept to the locality tensor algebra, and locality symmetric algebra of a pre-locality vector space and prove the universal properties of each of such structures. We also introduce the pre-locality Lie algebras, together with their associated locality universal enveloping algebras and prove their universal property. We later upgrade all such structures and results from the pre-locality to the locality context, requiring the locality relation to be compatible with the linear structure of the vector space. This allows us to define locality coalgebras, locality bialgebras, and locality Hopf algebras. Finally, all the previous results are used to prove the locality version of the Milnor-Moore and the Poincar{\´e}-Birkhoff-Witt theorems. It is worth noticing that the proofs presented, not only generalise the results in the usual (non-locality) setup, but also often use less tools than their counterparts in their non-locality counterparts. The second part is devoted to study the polar structure of the Shintani zeta functions. Such functions, which generalise the Riemman zeta function, multiple zeta functions, Mordell-Tornheim zeta functions, among others, are parametrised by matrices with real non-negative arguments. It is known that Shintani zeta functions extend to meromorphic functions with poles on afine hyperplanes. We refine this result in showing that the poles lie on hyperplanes parallel to the facets of certain convex polyhedra associated to the defining matrix for the Shintani zeta function. Explicitly, the latter are the Newton polytopes of the polynomials induced by the columns of the underlying matrix. We then prove that the coeficients of the equation which describes the hyperplanes in the canonical basis are either zero or one, similar to the poles arising when renormalising generic Feynman amplitudes. For that purpose, we introduce an algorithm to distribute weight over a graph such that the weight at each vertex satisfies a given lower bound.}, language = {en} } @phdthesis{Branding2012, author = {Branding, Volker}, title = {The evolution equations for Dirac-harmonic Maps}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64204}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {This thesis investigates the gradient flow of Dirac-harmonic maps. Dirac-harmonic maps are critical points of an energy functional that is motivated from supersymmetric field theories. The critical points of this energy functional couple the equation for harmonic maps with spinor fields. At present, many analytical properties of Dirac-harmonic maps are known, but a general existence result is still missing. In this thesis the existence question is studied using the evolution equations for a regularized version of Dirac-harmonic maps. Since the energy functional for Dirac-harmonic maps is unbounded from below the method of the gradient flow cannot be applied directly. Thus, we first of all consider a regularization prescription for Dirac-harmonic maps and then study the gradient flow. Chapter 1 gives some background material on harmonic maps/harmonic spinors and summarizes the current known results about Dirac-harmonic maps. Chapter 2 introduces the notion of Dirac-harmonic maps in detail and presents a regularization prescription for Dirac-harmonic maps. In Chapter 3 the evolution equations for regularized Dirac-harmonic maps are introduced. In addition, the evolution of certain energies is discussed. Moreover, the existence of a short-time solution to the evolution equations is established. Chapter 4 analyzes the evolution equations in the case that the domain manifold is a closed curve. Here, the existence of a smooth long-time solution is proven. Moreover, for the regularization being large enough, it is shown that the evolution equations converge to a regularized Dirac-harmonic map. Finally, it is discussed in which sense the regularization can be removed. In Chapter 5 the evolution equations are studied when the domain manifold is a closed Riemmannian spin surface. For the regularization being large enough, the existence of a global weak solution, which is smooth away from finitely many singularities is proven. It is shown that the evolution equations converge weakly to a regularized Dirac-harmonic map. In addition, it is discussed if the regularization can be removed in this case.}, language = {en} } @phdthesis{Koh2008, author = {Koh, Dennis}, title = {The evolution equation for closed magnetic geodesics}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-940793-24-9}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-16647}, school = {Universit{\"a}t Potsdam}, pages = {60}, year = {2008}, abstract = {Orbits of charged particles under the effect of a magnetic field are mathematically described by magnetic geodesics. They appear as solutions to a system of (nonlinear) ordinary differential equations of second order. But we are only interested in periodic solutions. To this end, we study the corresponding system of (nonlinear) parabolic equations for closed magnetic geodesics and, as a main result, eventually prove the existence of long time solutions. As generalization one can consider a system of elliptic nonlinear partial differential equations whose solutions describe the orbits of closed p-branes under the effect of a "generalized physical force". For the corresponding evolution equation, which is a system of parabolic nonlinear partial differential equations associated to the elliptic PDE, we can establish existence of short time solutions.}, language = {en} } @phdthesis{LindbladPetersen2017, author = {Lindblad Petersen, Oliver}, title = {The Cauchy problem for the linearised Einstein equation and the Goursat problem for wave equations}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-410216}, school = {Universit{\"a}t Potsdam}, pages = {108}, year = {2017}, abstract = {In this thesis, we study two initial value problems arising in general relativity. The first is the Cauchy problem for the linearised Einstein equation on general globally hyperbolic spacetimes, with smooth and distributional initial data. We extend well-known results by showing that given a solution to the linearised constraint equations of arbitrary real Sobolev regularity, there is a globally defined solution, which is unique up to addition of gauge solutions. Two solutions are considered equivalent if they differ by a gauge solution. Our main result is that the equivalence class of solutions depends continuously on the corre- sponding equivalence class of initial data. We also solve the linearised constraint equations in certain cases and show that there exist arbitrarily irregular (non-gauge) solutions to the linearised Einstein equation on Minkowski spacetime and Kasner spacetime. In the second part, we study the Goursat problem (the characteristic Cauchy problem) for wave equations. We specify initial data on a smooth compact Cauchy horizon, which is a lightlike hypersurface. This problem has not been studied much, since it is an initial value problem on a non-globally hyperbolic spacetime. Our main result is that given a smooth function on a non-empty, smooth, compact, totally geodesic and non-degenerate Cauchy horizon and a so called admissible linear wave equation, there exists a unique solution that is defined on the globally hyperbolic region and restricts to the given function on the Cauchy horizon. Moreover, the solution depends continuously on the initial data. A linear wave equation is called admissible if the first order part satisfies a certain condition on the Cauchy horizon, for example if it vanishes. Interestingly, both existence of solution and uniqueness are false for general wave equations, as examples show. If we drop the non-degeneracy assumption, examples show that existence of solution fails even for the simplest wave equation. The proof requires precise energy estimates for the wave equation close to the Cauchy horizon. In case the Ricci curvature vanishes on the Cauchy horizon, we show that the energy estimates are strong enough to prove local existence and uniqueness for a class of non-linear wave equations. Our results apply in particular to the Taub-NUT spacetime and the Misner spacetime. It has recently been shown that compact Cauchy horizons in spacetimes satisfying the null energy condition are necessarily smooth and totally geodesic. Our results therefore apply if the spacetime satisfies the null energy condition and the Cauchy horizon is compact and non-degenerate.}, language = {en} } @phdthesis{Bartels1999, author = {Bartels, Knut}, title = {Tests zur Modellspezifikation in der nichtlinearen Regression}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0000171}, school = {Universit{\"a}t Potsdam}, year = {1999}, abstract = {Als Grundlage vieler statistischer Verfahren wird der Prozess der Entstehung von Daten modelliert, um dann weitere Sch{\"a}tz- und Testverfahren anzuwenden. Diese Arbeit befasst sich mit der Frage, wie diese Spezifikation f{\"u}r parametrische Modelle selbst getestet werden kann. In Erweiterung bestehender Verfahren werden Tests mit festem Kern eingef{\"u}hrt und ihre asymptotischen Eigenschaften werden analysiert. Es wird gezeigt, dass die Bestimmung der kritischen Werte mit mehreren Stichprobenwiederholungsverfahren m{\"o}glich ist. Von diesen ist eine neue Monte-Carlo-Approximation besonders wichtig, da sie die Komplexit{\"a}t der Berechnung deutlich verringern kann. Ein bedingter Kleinste-Quadrate-Sch{\"a}tzer f{\"u}r nichtlineare parametrische Modelle wird definiert und seine wesentlichen asymptotischen Eigenschaften werden hergeleitet. S{\"a}mtliche Versionen der Tests und alle neuen Konzepte wurden in Simulationsstudien untersucht, deren wichtigste Resultate pr{\"a}sentiert werden. Die praktische Anwendbarkeit der Testverfahren wird an einem Datensatz zur Produktwahl dargelegt, der mit multinomialen Logit-Modellen analysiert werden soll.}, language = {de} } @phdthesis{Hain2022, author = {Hain, Tobias Martin}, title = {Structure formation and identification in geometrically driven soft matter systems}, doi = {10.25932/publishup-55880}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-558808}, school = {Universit{\"a}t Potsdam}, pages = {xviii, 171}, year = {2022}, abstract = {Subdividing space through interfaces leads to many space partitions that are relevant to soft matter self-assembly. Prominent examples include cellular media, e.g. soap froths, which are bubbles of air separated by interfaces of soap and water, but also more complex partitions such as bicontinuous minimal surfaces. Using computer simulations, this thesis analyses soft matter systems in terms of the relationship between the physical forces between the system's constituents and the structure of the resulting interfaces or partitions. The focus is on two systems, copolymeric self-assembly and the so-called Quantizer problem, where the driving force of structure formation, the minimisation of the free-energy, is an interplay of surface area minimisation and stretching contributions, favouring cells of uniform thickness. In the first part of the thesis we address copolymeric phase formation with sharp interfaces. We analyse a columnar copolymer system "forced" to assemble on a spherical surface, where the perfect solution, the hexagonal tiling, is topologically prohibited. For a system of three-armed copolymers, the resulting structure is described by solutions of the so-called Thomson problem, the search of minimal energy configurations of repelling charges on a sphere. We find three intertwined Thomson problem solutions on a single sphere, occurring at a probability depending on the radius of the substrate. We then investigate the formation of amorphous and crystalline structures in the Quantizer system, a particulate model with an energy functional without surface tension that favours spherical cells of equal size. We find that quasi-static equilibrium cooling allows the Quantizer system to crystallise into a BCC ground state, whereas quenching and non-equilibrium cooling, i.e. cooling at slower rates then quenching, leads to an approximately hyperuniform, amorphous state. The assumed universality of the latter, i.e. independence of energy minimisation method or initial configuration, is strengthened by our results. We expand the Quantizer system by introducing interface tension, creating a model that we find to mimic polymeric micelle systems: An order-disorder phase transition is observed with a stable Frank-Caspar phase. The second part considers bicontinuous partitions of space into two network-like domains, and introduces an open-source tool for the identification of structures in electron microscopy images. We expand a method of matching experimentally accessible projections with computed projections of potential structures, introduced by Deng and Mieczkowski (1998). The computed structures are modelled using nodal representations of constant-mean-curvature surfaces. A case study conducted on etioplast cell membranes in chloroplast precursors establishes the double Diamond surface structure to be dominant in these plant cells. We automate the matching process employing deep-learning methods, which manage to identify structures with excellent accuracy.}, language = {en} } @phdthesis{Pedeches2017, author = {P{\´e}d{\`e}ches, Laure}, title = {Stochastic models for collective motions of populations}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-405491}, school = {Universit{\"a}t Potsdam}, pages = {187}, year = {2017}, abstract = {Stochastisches Modell f{\"u}r kollektive Bewegung von Populationen In dieser Doktorarbeit befassen wir uns mit stochastischen Systemen, die eines der mysteri{\"o}sesten biologischen Ph{\"a}nomene als Modell darstellen: die kollektive Bewegung von Gemeinschaften. Diese werden bei V{\"o}gel- und Fischschw{\"a}rmen, aber auch bei manchen Bakterien, Viehherden oder gar bei Menschen beobachtet. Dieser Verhaltenstyp spielt ebenfalls in anderen Bereichen wie Finanzwesen, Linguistik oder auch Robotik eine Rolle. Wir nehmen uns der Dynamik einer Gruppe von N Individuen, insbesondere zweier asymptotischen Verhaltenstypen an. Einerseits befassen wir uns mit den Eigenschaften der Ergodizit{\"a}t in Langzeit: Existenz einer invarianten Wahrscheinlichkeitsverteilung durch Ljapunow-Funktionen, und Konvergenzrate der {\"U}bergangshalbgruppe gegen diese Wahrscheinlichkeit. Eine ebenfalls zentrale Thematik unserer Forschung ist der Begriff Flocking: es wird damit definiert, dass eine Gruppe von Individuen einen dynamischen Konsens ohne hierarchische Struktur erreichen kann; mathematisch gesehen entspricht dies der Aneinanderreihung der Geschwindigkeiten und dem Zusammenkommen des Schwarmes. Andererseits gehen wir das Ph{\"a}nomen der "Propagation of Chaos" an, wenn die Anzahl N der Teilchen ins Unendliche tendiert: die Bewegungen der jeweiligen Individuen werden asymptotisch unabh{\"a}ngig. Unser Ausgangspunkt ist das Cucker-Smale-Modell, ein deterministisches kinetisches Molekular-Modell f{\"u}r eine Gruppe ohne hierarchische Struktur. Die Wechselwirkung zwischen zwei Teilchen variiert gem{\"a}ß deren "Kommunikationsrate", die wiederum von deren relativen Entfernung abh{\"a}ngt und polynomisch abnimmt. Im ersten Kapitel adressieren wir das asymptotische Verhalten eines Cucker-Smale-Modells mit Rauschst{\"o}rung und dessen Varianten. Kapitel 2 stellt mehrere Definitionen des Flockings in einem Zufallsrahmen dar: diverse stochastische Systeme, die verschiedenen Rauschformen entsprechen (die eine gest{\"o}rte Umgebung, den "freien Willen" des jeweiligen Individuums oder eine unterbrochene {\"U}bertragung suggerieren) werden im Zusammenhang mit diesen Begriffen unter die Lupe genommen. Das dritte Kapitel basiert auf der "Cluster Expansion"-Methode aus der statistischen Mechanik. Wir beweisen die exponentielle Ergodizit{\"a}t von gewissen nicht-Markow-Prozessen mit nicht-glattem Drift und wenden diese Ergebnisse auf St{\"o}rungen des Ornstein-Uhlenbeck-Prozesses an. Im letzten Teil, nehmen wir uns der zweidimensionalen parabolisch-elliptischen Gleichung von Keller-Segel an. Wir beweisen die Existenz einer L{\"o}sung, welche in gewisser Hinsicht einzig ist, indem wir, mittels Vergleich mit Bessel-Prozessen und der Dirichlet Formtheorie, m{\"o}gliche Stoßtypen zwischen den Teilchen ermitteln.}, language = {en} } @phdthesis{Kroencke2013, author = {Kr{\"o}ncke, Klaus}, title = {Stability of Einstein Manifolds}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-69639}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {This thesis deals with Einstein metrics and the Ricci flow on compact mani- folds. We study the second variation of the Einstein-Hilbert functional on Ein- stein metrics. In the first part of the work, we find curvature conditions which ensure the stability of Einstein manifolds with respect to the Einstein-Hilbert functional, i.e. that the second variation of the Einstein-Hilbert functional at the metric is nonpositive in the direction of transverse-traceless tensors. The second part of the work is devoted to the study of the Ricci flow and how its behaviour close to Einstein metrics is influenced by the variational be- haviour of the Einstein-Hilbert functional. We find conditions which imply that Einstein metrics are dynamically stable or unstable with respect to the Ricci flow and we express these conditions in terms of stability properties of the metric with respect to the Einstein-Hilbert functional and properties of the Laplacian spectrum.}, language = {en} } @phdthesis{Oancea2021, author = {Oancea, Marius-Adrian}, title = {Spin Hall effects in general relativity}, doi = {10.25932/publishup-50229}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-502293}, school = {Universit{\"a}t Potsdam}, pages = {vii, 123}, year = {2021}, abstract = {The propagation of test fields, such as electromagnetic, Dirac or linearized gravity, on a fixed spacetime manifold is often studied by using the geometrical optics approximation. In the limit of infinitely high frequencies, the geometrical optics approximation provides a conceptual transition between the test field and an effective point-particle description. The corresponding point-particles, or wave rays, coincide with the geodesics of the underlying spacetime. For most astrophysical applications of interest, such as the observation of celestial bodies, gravitational lensing, or the observation of cosmic rays, the geometrical optics approximation and the effective point-particle description represent a satisfactory theoretical model. However, the geometrical optics approximation gradually breaks down as test fields of finite frequency are considered. In this thesis, we consider the propagation of test fields on spacetime, beyond the leading-order geometrical optics approximation. By performing a covariant Wentzel-Kramers-Brillouin analysis for test fields, we show how higher-order corrections to the geometrical optics approximation can be considered. The higher-order corrections are related to the dynamics of the spin internal degree of freedom of the considered test field. We obtain an effective point-particle description, which contains spin-dependent corrections to the geodesic motion obtained using geometrical optics. This represents a covariant generalization of the well-known spin Hall effect, usually encountered in condensed matter physics and in optics. Our analysis is applied to electromagnetic and massive Dirac test fields, but it can easily be extended to other fields, such as linearized gravity. In the electromagnetic case, we present several examples where the gravitational spin Hall effect of light plays an important role. These include the propagation of polarized light rays on black hole spacetimes and cosmological spacetimes, as well as polarization-dependent effects on the shape of black hole shadows. Furthermore, we show that our effective point-particle equations for polarized light rays reproduce well-known results, such as the spin Hall effect of light in an inhomogeneous medium, and the relativistic Hall effect of polarized electromagnetic wave packets encountered in Minkowski spacetime.}, language = {en} } @phdthesis{Perera2021, author = {Perera, Upeksha}, title = {Solutions of direct and inverse Sturm-Liouville problems}, doi = {10.25932/publishup-53006}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-530064}, school = {Universit{\"a}t Potsdam}, pages = {x, 109}, year = {2021}, abstract = {Lie group method in combination with Magnus expansion is utilized to develop a universal method applicable to solving a Sturm-Liouville Problem (SLP) of any order with arbitrary boundary conditions. It is shown that the method has ability to solve direct regular and some singular SLPs of even orders (tested up to order eight), with a mix of boundary conditions (including non-separable and finite singular endpoints), accurately and efficiently. The present technique is successfully applied to overcome the difficulties in finding suitable sets of eigenvalues so that the inverse SLP problem can be effectively solved. Next, a concrete implementation to the inverse Sturm-Liouville problem algorithm proposed by Barcilon (1974) is provided. Furthermore, computational feasibility and applicability of this algorithm to solve inverse Sturm-Liouville problems of order n=2,4 is verified successfully. It is observed that the method is successful even in the presence of significant noise, provided that the assumptions of the algorithm are satisfied. In conclusion, this work provides methods that can be adapted successfully for solving a direct (regular/singular) or inverse SLP of an arbitrary order with arbitrary boundary conditions.}, language = {en} } @phdthesis{Wallenta2015, author = {Wallenta, Daniel}, title = {Sequences of compact curvature}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-87489}, school = {Universit{\"a}t Potsdam}, pages = {viii, 73}, year = {2015}, abstract = {By perturbing the differential of a (cochain-)complex by "small" operators, one obtains what is referred to as quasicomplexes, i.e. a sequence whose curvature is not equal to zero in general. In this situation the cohomology is no longer defined. Note that it depends on the structure of the underlying spaces whether or not an operator is "small." This leads to a magical mix of perturbation and regularisation theory. In the general setting of Hilbert spaces compact operators are "small." In order to develop this theory, many elements of diverse mathematical disciplines, such as functional analysis, differential geometry, partial differential equation, homological algebra and topology have to be combined. All essential basics are summarised in the first chapter of this thesis. This contains classical elements of index theory, such as Fredholm operators, elliptic pseudodifferential operators and characteristic classes. Moreover we study the de Rham complex and introduce Sobolev spaces of arbitrary order as well as the concept of operator ideals. In the second chapter, the abstract theory of (Fredholm) quasicomplexes of Hilbert spaces will be developed. From the very beginning we will consider quasicomplexes with curvature in an ideal class. We introduce the Euler characteristic, the cone of a quasiendomorphism and the Lefschetz number. In particular, we generalise Euler's identity, which will allow us to develop the Lefschetz theory on nonseparable Hilbert spaces. Finally, in the third chapter the abstract theory will be applied to elliptic quasicomplexes with pseudodifferential operators of arbitrary order. We will show that the Atiyah-Singer index formula holds true for those objects and, as an example, we will compute the Euler characteristic of the connection quasicomplex. In addition to this we introduce geometric quasiendomorphisms and prove a generalisation of the Lefschetz fixed point theorem of Atiyah and Bott.}, language = {en} } @phdthesis{DiGesu2012, author = {Di Ges{\`u}, Giacomo}, title = {Semiclassical spectral analysis of discrete Witten Laplacians}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-65286}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {A discrete analogue of the Witten Laplacian on the n-dimensional integer lattice is considered. After rescaling of the operator and the lattice size we analyze the tunnel effect between different wells, providing sharp asymptotics of the low-lying spectrum. Our proof, inspired by work of B. Helffer, M. Klein and F. Nier in continuous setting, is based on the construction of a discrete Witten complex and a semiclassical analysis of the corresponding discrete Witten Laplacian on 1-forms. The result can be reformulated in terms of metastable Markov processes on the lattice.}, language = {en} } @phdthesis{Hohberger2006, author = {Hohberger, Horst}, title = {Semiclassical asymptotics for the scattering amplitude in the presence of focal points at infinity}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-11574}, school = {Universit{\"a}t Potsdam}, year = {2006}, abstract = {We consider scattering in \$\R^n\$, \$n\ge 2\$, described by the Schr\"odinger operator \$P(h)=-h^2\Delta+V\$, where \$V\$ is a short-range potential. With the aid of Maslov theory, we give a geometrical formula for the semiclassical asymptotics as \$h\to 0\$ of the scattering amplitude \$f(\omega_-,\omega_+;\lambda,h)\$ \$\omega_+\neq\omega_-\$) which remains valid in the presence of focal points at infinity (caustics). Crucial for this analysis are precise estimates on the asymptotics of the classical phase trajectories and the relationship between caustics in euclidean phase space and caustics at infinity.}, subject = {Mathematik}, language = {en} } @phdthesis{Demircioglu2007, author = {Demircioglu, Aydin}, title = {Reconstruction of deligne classes and cocycles}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-13755}, school = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {In der vorliegenden Arbeit verallgemeinern wir im Wesentlichen zwei Theoreme von Mackaay-Picken und Picken (2002, 2004). Im ihrem Artikel zeigen Mackaay und Picken,dass es eine bijektive Korrespodenz zwischen Deligne 2-Klassen \$\xi \in \check{H}^2(M, \mathcal{D}^2)\$ und Holonomie Abbildungen von der zweiten d{\"u}nnen Homotopiegruppe \$\pi_2^2(M)\$ in die abelsche Gruppe \$U(1)\$ gibt. Im zweiten Artikel wird eine Verallgemeinerung dieses Theorems bewiesen: Picken zeigt, dass es eine Bijektion gibt zwischen Deligne 2-Kozykeln und gewissen 2-dimensionalen topologischen Quantenfeldtheorien. In dieser Arbeit zeigen wir, dass diese beiden Theoreme in allen Dimensionen gelten.Wir betrachten zun{\"a}chst den Holonomie Fall und k{\"o}nnen mittels simplizialen Methoden nachweisen, dass die Gruppe der glatten Deligne \$d\$-Klassen isomorph ist zu der Gruppe der glatten Holonomie Abbildungen von der \$d\$-ten d{\"u}nnen Homotopiegruppe \$\pi_d^d(M)\$ nach \$U(1)\$, sofern \$M\$ eine \$(d-1)\$-zusammenh{\"a}ngende Mannigfaltigkeit ist. Wir vergleichen dieses Resultat mit einem Satz von Gajer (1999). Gajer zeigte, dass jede Deligne \$d\$-Klasse durch eine andere Klasse von Holonomie-Abbildungen rekonstruiert werden kann, die aber nicht nur Holonomien entlang von Sph{\"a}ren, sondern auch entlang von allgemeinen \$d\$-Mannigfaltigkeiten in \$M\$ enth{\"a}lt. Dieser Zugang ben{\"o}tigt dann aber nicht, dass \$M\$ hoch-zusammenh{\"a}ngend ist. Wir zeigen, dass im Falle von flachen Deligne \$d\$-Klassen unser Rekonstruktionstheorem sich von Gajers unterscheidet, sofern \$M\$ nicht als \$(d-1)\$, sondern nur als \$(d-2)\$-zusammenh{\"a}ngend angenommen wird. Stiefel Mannigfaltigkeiten besitzen genau diese Eigenschaft, und wendet man unser Theorem auf diese an und vergleicht das Resultat mit dem von Gajer, so zeigt sich, dass es zuviele Deligne Klassen rekonstruiert. Dies bedeutet, dass unser Rekonstruktionsthreorem ohne die Zusatzbedingungen an die Mannigfaltigkeit M nicht auskommt, d.h. unsere Rekonstruktion ben{\"o}tigt zwar weniger Informationen {\"u}ber die Holonomie entlang von d-dimensionalen Mannigfaltigkeiten, aber daf{\"u}r muss M auch \$(d-1)\$-zusammenh{\"a}ngend angenommen werden. Wir zeigen dann, dass auch das zweite Theorem verallgemeinert werden kann: Indem wir das Konzept einer Picken topologischen Quantenfeldtheorie in beliebigen Dimensionen einf{\"u}hren, k{\"o}nnen wir nachweisen, dass jeder Deligne \$d\$-Kozykel eine solche \$d\$-dimensionale Feldtheorie mit zwei besonderen Eigenschaften, der d{\"u}nnen Invarianz und der Glattheit, induziert. Wir beweisen, dass jede \$d\$-dimensionale topologische Quantenfeldtheorie nach Picken mit diesen zwei Eigenschaften auch eine Deligne \$d\$-Klasse definiert und pr{\"u}fen nach, dass diese Konstruktion sowohl surjektiv als auch injektiv ist. Demzufolge sind beide Gruppen isomorph.}, language = {en} } @phdthesis{Murr2012, author = {Murr, R{\"u}diger}, title = {Reciprocal classes of Markov processes : an approach with duality formulae}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-62091}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {This work is concerned with the characterization of certain classes of stochastic processes via duality formulae. In particular we consider reciprocal processes with jumps, a subject up to now neglected in the literature. In the first part we introduce a new formulation of a characterization of processes with independent increments. This characterization is based on a duality formula satisfied by processes with infinitely divisible increments, in particular L{\´e}vy processes, which is well known in Malliavin calculus. We obtain two new methods to prove this duality formula, which are not based on the chaos decomposition of the space of square-integrable function- als. One of these methods uses a formula of partial integration that characterizes infinitely divisible random vectors. In this context, our characterization is a generalization of Stein's lemma for Gaussian random variables and Chen's lemma for Poisson random variables. The generality of our approach permits us to derive a characterization of infinitely divisible random measures. The second part of this work focuses on the study of the reciprocal classes of Markov processes with and without jumps and their characterization. We start with a resume of already existing results concerning the reciprocal classes of Brownian diffusions as solutions of duality formulae. As a new contribution, we show that the duality formula satisfied by elements of the reciprocal class of a Brownian diffusion has a physical interpretation as a stochastic Newton equation of motion. Thus we are able to connect the results of characterizations via duality formulae with the theory of stochastic mechanics by our interpretation, and to stochastic optimal control theory by the mathematical approach. As an application we are able to prove an invariance property of the reciprocal class of a Brownian diffusion under time reversal. In the context of pure jump processes we derive the following new results. We describe the reciprocal classes of Markov counting processes, also called unit jump processes, and obtain a characterization of the associated reciprocal class via a duality formula. This formula contains as key terms a stochastic derivative, a compensated stochastic integral and an invariant of the reciprocal class. Moreover we present an interpretation of the characterization of a reciprocal class in the context of stochastic optimal control of unit jump processes. As a further application we show that the reciprocal class of a Markov counting process has an invariance property under time reversal. Some of these results are extendable to the setting of pure jump processes, that is, we admit different jump-sizes. In particular, we show that the reciprocal classes of Markov jump processes can be compared using reciprocal invariants. A characterization of the reciprocal class of compound Poisson processes via a duality formula is possible under the assumption that the jump-sizes of the process are incommensurable.}, language = {en} } @phdthesis{Conforti2015, author = {Conforti, Giovanni}, title = {Reciprocal classes of continuous time Markov Chains}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82255}, school = {Universit{\"a}t Potsdam}, pages = {xvi, 183}, year = {2015}, abstract = {In this thesis we study reciprocal classes of Markov chains. Given a continuous time Markov chain on a countable state space, acting as reference dynamics, the associated reciprocal class is the set of all probability measures on path space that can be written as a mixture of its bridges. These processes possess a conditional independence property that generalizes the Markov property, and evolved from an idea of Schr{\"o}dinger, who wanted to obtain a probabilistic interpretation of quantum mechanics. Associated to a reciprocal class is a set of reciprocal characteristics, which are space-time functions that determine the reciprocal class. We compute explicitly these characteristics, and divide them into two main families: arc characteristics and cycle characteristics. As a byproduct, we obtain an explicit criterion to check when two different Markov chains share their bridges. Starting from the characteristics we offer two different descriptions of the reciprocal class, including its non-Markov probabilities. The first one is based on a pathwise approach and the second one on short time asymptotic. With the first approach one produces a family of functional equations whose only solutions are precisely the elements of the reciprocal class. These equations are integration by parts on path space associated with derivative operators which perturb the paths by mean of the addition of random loops. Several geometrical tools are employed to construct such formulas. The problem of obtaining sharp characterizations is also considered, showing some interesting connections with discrete geometry. Examples of such formulas are given in the framework of counting processes and random walks on Abelian groups, where the set of loops has a group structure. In addition to this global description, we propose a second approach by looking at the short time behavior of a reciprocal process. In the same way as the Markov property and short time expansions of transition probabilities characterize Markov chains, we show that a reciprocal class is characterized by imposing the reciprocal property and two families of short time expansions for the bridges. Such local approach is suitable to study reciprocal processes on general countable graphs. As application of our characterization, we considered several interesting graphs, such as lattices, planar graphs, the complete graph, and the hypercube. Finally, we obtain some first results about concentration of measure implied by lower bounds on the reciprocal characteristics.}, language = {en} } @phdthesis{Fischer2022, author = {Fischer, Jens Walter}, title = {Random dynamics in collective behavior - consensus, clustering \& extinction of populations}, doi = {10.25932/publishup-55372}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-553725}, school = {Universit{\"a}t Potsdam}, pages = {242}, year = {2022}, abstract = {The echo chamber model describes the development of groups in heterogeneous social networks. By heterogeneous social network we mean a set of individuals, each of whom represents exactly one opinion. The existing relationships between individuals can then be represented by a graph. The echo chamber model is a time-discrete model which, like a board game, is played in rounds. In each round, an existing relationship is randomly and uniformly selected from the network and the two connected individuals interact. If the opinions of the individuals involved are sufficiently similar, they continue to move closer together in their opinions, whereas in the case of opinions that are too far apart, they break off their relationship and one of the individuals seeks a new relationship. In this paper we examine the building blocks of this model. We start from the observation that changes in the structure of relationships in the network can be described by a system of interacting particles in a more abstract space. These reflections lead to the definition of a new abstract graph that encompasses all possible relational configurations of the social network. This provides us with the geometric understanding necessary to analyse the dynamic components of the echo chamber model in Part III. As a first step, in Part 7, we leave aside the opinions of the inidividuals and assume that the position of the edges changes with each move as described above, in order to obtain a basic understanding of the underlying dynamics. Using Markov chain theory, we find upper bounds on the speed of convergence of an associated Markov chain to its unique stationary distribution and show that there are mutually identifiable networks that are not apparent in the dynamics under analysis, in the sense that the stationary distribution of the associated Markov chain gives equal weight to these networks. In the reversible cases, we focus in particular on the explicit form of the stationary distribution as well as on the lower bounds of the Cheeger constant to describe the convergence speed. The final result of Section 8, based on absorbing Markov chains, shows that in a reduced version of the echo chamber model, a hierarchical structure of the number of conflicting relations can be identified. We can use this structure to determine an upper bound on the expected absorption time, using a quasi-stationary distribution. This hierarchy of structure also provides a bridge to classical theories of pure death processes. We conclude by showing how future research can exploit this link and by discussing the importance of the results as building blocks for a full theoretical understanding of the echo chamber model. Finally, Part IV presents a published paper on the birth-death process with partial catastrophe. The paper is based on the explicit calculation of the first moment of a catastrophe. This first part is entirely based on an analytical approach to second degree recurrences with linear coefficients. The convergence to 0 of the resulting sequence as well as the speed of convergence are proved. On the other hand, the determination of the upper bounds of the expected value of the population size as well as its variance and the difference between the determined upper bound and the actual value of the expected value. For these results we use almost exclusively the theory of ordinary nonlinear differential equations.}, language = {en} } @phdthesis{Nehring2012, author = {Nehring, Benjamin}, title = {Point processes in statistical mechanics : a cluster expansion approach}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-62682}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {A point process is a mechanism, which realizes randomly locally finite point measures. One of the main results of this thesis is an existence theorem for a new class of point processes with a so called signed Levy pseudo measure L, which is an extension of the class of infinitely divisible point processes. The construction approach is a combination of the classical point process theory, as developed by Kerstan, Matthes and Mecke, with the method of cluster expansions from statistical mechanics. Here the starting point is a family of signed Radon measures, which defines on the one hand the Levy pseudo measure L, and on the other hand locally the point process. The relation between L and the process is the following: this point process solves the integral cluster equation determined by L. We show that the results from the classical theory of infinitely divisible point processes carry over in a natural way to the larger class of point processes with a signed Levy pseudo measure. In this way we obtain e.g. a criterium for simplicity and a characterization through the cluster equation, interpreted as an integration by parts formula, for such point processes. Our main result in chapter 3 is a representation theorem for the factorial moment measures of the above point processes. With its help we will identify the permanental respective determinantal point processes, which belong to the classes of Boson respective Fermion processes. As a by-product we obtain a representation of the (reduced) Palm kernels of infinitely divisible point processes. In chapter 4 we see how the existence theorem enables us to construct (infinitely extended) Gibbs, quantum-Bose and polymer processes. The so called polymer processes seem to be constructed here for the first time. In the last part of this thesis we prove that the family of cluster equations has certain stability properties with respect to the transformation of its solutions. At first this will be used to show how large the class of solutions of such equations is, and secondly to establish the cluster theorem of Kerstan, Matthes and Mecke in our setting. With its help we are able to enlarge the class of Polya processes to the so called branching Polya processes. The last sections of this work are about thinning and splitting of point processes. One main result is that the classes of Boson and Fermion processes remain closed under thinning. We use the results on thinning to identify a subclass of point processes with a signed Levy pseudo measure as doubly stochastic Poisson processes. We also pose the following question: Assume you observe a realization of a thinned point process. What is the distribution of deleted points? Surprisingly, the Papangelou kernel of the thinning, besides a constant factor, is given by the intensity measure of this conditional probability, called splitting kernel.}, language = {en} } @phdthesis{Ludewig2016, author = {Ludewig, Matthias}, title = {Path integrals on manifolds with boundary and their asymptotic expansions}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94387}, school = {Universit{\"a}t Potsdam}, pages = {146}, year = {2016}, abstract = {It is "scientific folklore" coming from physical heuristics that solutions to the heat equation on a Riemannian manifold can be represented by a path integral. However, the problem with such path integrals is that they are notoriously ill-defined. One way to make them rigorous (which is often applied in physics) is finite-dimensional approximation, or time-slicing approximation: Given a fine partition of the time interval into small subintervals, one restricts the integration domain to paths that are geodesic on each subinterval of the partition. These finite-dimensional integrals are well-defined, and the (infinite-dimensional) path integral then is defined as the limit of these (suitably normalized) integrals, as the mesh of the partition tends to zero. In this thesis, we show that indeed, solutions to the heat equation on a general compact Riemannian manifold with boundary are given by such time-slicing path integrals. Here we consider the heat equation for general Laplace type operators, acting on sections of a vector bundle. We also obtain similar results for the heat kernel, although in this case, one has to restrict to metrics satisfying a certain smoothness condition at the boundary. One of the most important manipulations one would like to do with path integrals is taking their asymptotic expansions; in the case of the heat kernel, this is the short time asymptotic expansion. In order to use time-slicing approximation here, one needs the approximation to be uniform in the time parameter. We show that this is possible by giving strong error estimates. Finally, we apply these results to obtain short time asymptotic expansions of the heat kernel also in degenerate cases (i.e. at the cut locus). Furthermore, our results allow to relate the asymptotic expansion of the heat kernel to a formal asymptotic expansion of the infinite-dimensional path integral, which gives relations between geometric quantities on the manifold and on the loop space. In particular, we show that the lowest order term in the asymptotic expansion of the heat kernel is essentially given by the Fredholm determinant of the Hessian of the energy functional. We also investigate how this relates to the zeta-regularized determinant of the Jacobi operator along minimizing geodesics.}, language = {en} } @phdthesis{Lyu2016, author = {Lyu, Xiaojing}, title = {Operators on singular manifolds}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-103643}, school = {Universit{\"a}t Potsdam}, pages = {117}, year = {2016}, abstract = {We study the interplay between analysis on manifolds with singularities and complex analysis and develop new structures of operators based on the Mellin transform and tools for iterating the calculus for higher singularities. We refer to the idea of interpreting boundary value problems (BVPs) in terms of pseudo-differential operators with a principal symbolic hierarchy, taking into account that BVPs are a source of cone and edge operator algebras. The respective cone and edge pseudo-differential algebras in turn are the starting point of higher corner theories. In addition there are deep relationships between corner operators and complex analysis. This will be illustrated by the Mellin symbolic calculus.}, language = {en} } @phdthesis{Becker2005, author = {Becker, Christian}, title = {On the Riemannian geometry of Seiberg-Witten moduli spaces}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-5425}, school = {Universit{\"a}t Potsdam}, year = {2005}, abstract = {In this thesis, we give two constructions for Riemannian metrics on Seiberg-Witten moduli spaces. Both these constructions are naturally induced from the L2-metric on the configuration space. The construction of the so called quotient L2-metric is very similar to the one construction of an L2-metric on Yang-Mills moduli spaces as given by Groisser and Parker. To construct a Riemannian metric on the total space of the Seiberg-Witten bundle in a similar way, we define the reduced gauge group as a subgroup of the gauge group. We show, that the quotient of the premoduli space by the reduced gauge group is isomorphic as a U(1)-bundle to the quotient of the premoduli space by the based gauge group. The total space of this new representation of the Seiberg-Witten bundle carries a natural quotient L2-metric, and the bundle projection is a Riemannian submersion with respect to these metrics. We compute explicit formulae for the sectional curvature of the moduli space in terms of Green operators of the elliptic complex associated with a monopole. Further, we construct a Riemannian metric on the cobordism between moduli spaces for different perturbations. The second construction of a Riemannian metric on the moduli space uses a canonical global gauge fixing, which represents the total space of the Seiberg-Witten bundle as a finite dimensional submanifold of the configuration space. We consider the Seiberg-Witten moduli space on a simply connected K\äuhler surface. We show that the moduli space (when nonempty) is a complex projective space, if the perturbation does not admit reducible monpoles, and that the moduli space consists of a single point otherwise. The Seiberg-Witten bundle can then be identified with the Hopf fibration. On the complex projective plane with a special Spin-C structure, our Riemannian metrics on the moduli space are Fubini-Study metrics. Correspondingly, the metrics on the total space of the Seiberg-Witten bundle are Berger metrics. We show that the diameter of the moduli space shrinks to 0 when the perturbation approaches the wall of reducible perturbations. Finally we show, that the quotient L2-metric on the Seiberg-Witten moduli space on a K\ähler surface is a K\ähler metric.}, subject = {Eichtheorie}, language = {en} } @phdthesis{Mazzonetto2016, author = {Mazzonetto, Sara}, title = {On the exact simulation of (skew) Brownian diffusions with discontinuous drift}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-102399}, school = {Universit{\"a}t Potsdam}, pages = {ii, 100}, year = {2016}, abstract = {This thesis is focused on the study and the exact simulation of two classes of real-valued Brownian diffusions: multi-skew Brownian motions with constant drift and Brownian diffusions whose drift admits a finite number of jumps. The skew Brownian motion was introduced in the sixties by It{\^o} and McKean, who constructed it from the reflected Brownian motion, flipping its excursions from the origin with a given probability. Such a process behaves as the original one except at the point 0, which plays the role of a semipermeable barrier. More generally, a skew diffusion with several semipermeable barriers, called multi-skew diffusion, is a diffusion everywhere except when it reaches one of the barriers, where it is partially reflected with a probability depending on that particular barrier. Clearly, a multi-skew diffusion can be characterized either as solution of a stochastic differential equation involving weighted local times (these terms providing the semi-permeability) or by its infinitesimal generator as Markov process. In this thesis we first obtain a contour integral representation for the transition semigroup of the multiskew Brownian motion with constant drift, based on a fine analysis of its complex properties. Thanks to this representation we write explicitly the transition densities of the two-skew Brownian motion with constant drift as an infinite series involving, in particular, Gaussian functions and their tails. Then we propose a new useful application of a generalization of the known rejection sampling method. Recall that this basic algorithm allows to sample from a density as soon as one finds an - easy to sample - instrumental density verifying that the ratio between the goal and the instrumental densities is a bounded function. The generalized rejection sampling method allows to sample exactly from densities for which indeed only an approximation is known. The originality of the algorithm lies in the fact that one finally samples directly from the law without any approximation, except the machine's. As an application, we sample from the transition density of the two-skew Brownian motion with or without constant drift. The instrumental density is the transition density of the Brownian motion with constant drift, and we provide an useful uniform bound for the ratio of the densities. We also present numerical simulations to study the efficiency of the algorithm. The second aim of this thesis is to develop an exact simulation algorithm for a Brownian diffusion whose drift admits several jumps. In the literature, so far only the case of a continuous drift (resp. of a drift with one finite jump) was treated. The theoretical method we give allows to deal with any finite number of discontinuities. Then we focus on the case of two jumps, using the transition densities of the two-skew Brownian motion obtained before. Various examples are presented and the efficiency of our approach is discussed.}, language = {en} } @phdthesis{Gehring2023, author = {Gehring, Penelope}, title = {Non-local boundary conditions for the spin Dirac operator on spacetimes with timelike boundary}, doi = {10.25932/publishup-57775}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-577755}, school = {Universit{\"a}t Potsdam}, pages = {100}, year = {2023}, abstract = {Non-local boundary conditions - for example the Atiyah-Patodi-Singer (APS) conditions - for Dirac operators on Riemannian manifolds are rather well-understood, while not much is known for such operators on Lorentzian manifolds. Recently, B{\"a}r and Strohmaier [15] and Drago, Große, and Murro [27] introduced APS-like conditions for the spin Dirac operator on Lorentzian manifolds with spacelike and timelike boundary, respectively. While B{\"a}r and Strohmaier [15] showed the Fredholmness of the Dirac operator with these boundary conditions, Drago, Große, and Murro [27] proved the well-posedness of the corresponding initial boundary value problem under certain geometric assumptions. In this thesis, we will follow the footsteps of the latter authors and discuss whether the APS-like conditions for Dirac operators on Lorentzian manifolds with timelike boundary can be replaced by more general conditions such that the associated initial boundary value problems are still wellposed. We consider boundary conditions that are local in time and non-local in the spatial directions. More precisely, we use the spacetime foliation arising from the Cauchy temporal function and split the Dirac operator along this foliation. This gives rise to a family of elliptic operators each acting on spinors of the spin bundle over the corresponding timeslice. The theory of elliptic operators then ensures that we can find families of non-local boundary conditions with respect to this family of operators. Proceeding, we use such a family of boundary conditions to define a Lorentzian boundary condition on the whole timelike boundary. By analyzing the properties of the Lorentzian boundary conditions, we then find sufficient conditions on the family of non-local boundary conditions that lead to the well-posedness of the corresponding Cauchy problems. The well-posedness itself will then be proven by using classical tools including energy estimates and approximation by solutions of the regularized problems. Moreover, we use this theory to construct explicit boundary conditions for the Lorentzian Dirac operator. More precisely, we will discuss two examples of boundary conditions - the analogue of the Atiyah-Patodi-Singer and the chirality conditions, respectively, in our setting. For doing this, we will have a closer look at the theory of non-local boundary conditions for elliptic operators and analyze the requirements on the family of non-local boundary conditions for these specific examples.}, language = {en} } @phdthesis{Etzold2021, author = {Etzold, Heiko}, title = {Neue Zug{\"a}nge zum Winkelbegriff}, doi = {10.25932/publishup-50418}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-504187}, school = {Universit{\"a}t Potsdam}, pages = {300}, year = {2021}, abstract = {Die Vielf{\"a}ltigkeit des Winkelbegriffs ist gleichermaßen spannend wie herausfordernd in Hinblick auf seine Zug{\"a}nge im Mathematikunterricht der Schule. Ausgehend von verschiedenen Vorstellungen zum Winkelbegriff wird in dieser Arbeit ein Lehrgang zur Vermittlung des Winkelbegriffs entwickelt und letztlich in konkrete Umsetzungen f{\"u}r den Schulunterricht {\"u}berf{\"u}hrt. Dabei erfolgt zun{\"a}chst eine stoffdidaktische Auseinandersetzung mit dem Winkelbegriff, die von einer informationstheoretischen Winkeldefinition begleitet wird. In dieser wird eine Definition f{\"u}r den Winkelbegriff unter der Fragestellung entwickelt, welche Informationen man {\"u}ber einen Winkel ben{\"o}tigt, um ihn beschreiben zu k{\"o}nnen. So k{\"o}nnen die in der fachdidaktischen Literatur auftretenden Winkelvorstellungen aus fachmathematischer Perspektive erneut abgeleitet und validiert werden. Parallel dazu wird ein Verfahren beschrieben, wie Winkel - auch unter dynamischen Aspekten - informationstechnisch verarbeitet werden k{\"o}nnen, so dass Schlussfolgerungen aus der informationstheoretischen Winkeldefinition beispielsweise in dynamischen Geometriesystemen zur Verf{\"u}gung stehen. Unter dem Gesichtspunkt, wie eine Abstraktion des Winkelbegriffs im Mathematikunterricht vonstatten gehen kann, werden die Grundvorstellungsidee sowie die Lehrstrategie des Aufsteigens vom Abstrakten zum Konkreten miteinander in Beziehung gesetzt. Aus der Verkn{\"u}pfung der beiden Theorien wird ein grunds{\"a}tzlicher Weg abgeleitet, wie im Rahmen der Lehrstrategie eine Ausgangsabstraktion zu einzelnen Winkelaspekten aufgebaut werden kann, was die Generierung von Grundvorstellungen zu den Bestandteilen des jeweiligen Winkelaspekts und zum Operieren mit diesen Begriffsbestandteilen erm{\"o}glichen soll. Hierf{\"u}r wird die Lehrstrategie angepasst, um insbesondere den {\"U}bergang von Winkelsituationen zu Winkelkontexten zu realisieren. Explizit f{\"u}r den Aspekt des Winkelfeldes werden, anhand der Untersuchung der Sichtfelder von Tieren, Lernhandlungen und Forderungen an ein Lernmodell beschrieben, die Sch{\"u}lerinnen und Sch{\"u}ler bei der Begriffsaneignung unterst{\"u}tzen. Die T{\"a}tigkeitstheorie, der die genannte Lehrstrategie zuzuordnen ist, zieht sich als roter Faden durch die weitere Arbeit, wenn nun theoriebasiert Designprinzipien generiert werden, die in die Entwicklung einer interaktiven Lernumgebung m{\"u}nden. Hierzu wird u. a. das Modell der Artifact-Centric Activity Theory genutzt, das das Beziehungsgef{\"u}ge aus Sch{\"u}lerinnen und Sch{\"u}lern, dem mathematischen Gegenstand und einer zu entwickelnden App als vermittelndes Medium beschreibt, wobei der Einsatz der App im Unterrichtskontext sowie deren regelgeleitete Entwicklung Bestandteil des Modells sind. Gem{\"a}ß dem Ansatz der Fachdidaktischen Entwicklungsforschung wird die Lernumgebung anschließend in mehreren Zyklen erprobt, evaluiert und {\"u}berarbeitet. Dabei wird ein qualitatives Setting angewandt, das sich der Semiotischen Vermittlung bedient und untersucht, inwiefern sich die Qualit{\"a}t der von den Sch{\"u}lerinnen und Sch{\"u}lern gezeigten Lernhandlungen durch die Designprinzipien und deren Umsetzung erkl{\"a}ren l{\"a}sst. Am Ende der Arbeit stehen eine finale Version der Designprinzipien und eine sich daraus ergebende Lernumgebung zur Einf{\"u}hrung des Winkelfeldbegriffs in der vierten Klassenstufe.}, language = {de} } @phdthesis{Wichitsanguan2016, author = {Wichitsa-nguan, Korakot}, title = {Modifications and extensions of the logistic regression and Cox model}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-90033}, school = {Universit{\"a}t Potsdam}, pages = {x, 131}, year = {2016}, abstract = {In many statistical applications, the aim is to model the relationship between covariates and some outcomes. A choice of the appropriate model depends on the outcome and the research objectives, such as linear models for continuous outcomes, logistic models for binary outcomes and the Cox model for time-to-event data. In epidemiological, medical, biological, societal and economic studies, the logistic regression is widely used to describe the relationship between a response variable as binary outcome and explanatory variables as a set of covariates. However, epidemiologic cohort studies are quite expensive regarding data management since following up a large number of individuals takes long time. Therefore, the case-cohort design is applied to reduce cost and time for data collection. The case-cohort sampling collects a small random sample from the entire cohort, which is called subcohort. The advantage of this design is that the covariate and follow-up data are recorded only on the subcohort and all cases (all members of the cohort who develop the event of interest during the follow-up process). In this thesis, we investigate the estimation in the logistic model for case-cohort design. First, a model with a binary response and a binary covariate is considered. The maximum likelihood estimator (MLE) is described and its asymptotic properties are established. An estimator for the asymptotic variance of the estimator based on the maximum likelihood approach is proposed; this estimator differs slightly from the estimator introduced by Prentice (1986). Simulation results for several proportions of the subcohort show that the proposed estimator gives lower empirical bias and empirical variance than Prentice's estimator. Then the MLE in the logistic regression with discrete covariate under case-cohort design is studied. Here the approach of the binary covariate model is extended. Proving asymptotic normality of estimators, standard errors for the estimators can be derived. The simulation study demonstrates the estimation procedure of the logistic regression model with a one-dimensional discrete covariate. Simulation results for several proportions of the subcohort and different choices of the underlying parameters indicate that the estimator developed here performs reasonably well. Moreover, the comparison between theoretical values and simulation results of the asymptotic variance of estimator is presented. Clearly, the logistic regression is sufficient for the binary outcome refers to be available for all subjects and for a fixed time interval. Nevertheless, in practice, the observations in clinical trials are frequently collected for different time periods and subjects may drop out or relapse from other causes during follow-up. Hence, the logistic regression is not appropriate for incomplete follow-up data; for example, an individual drops out of the study before the end of data collection or an individual has not occurred the event of interest for the duration of the study. These observations are called censored observations. The survival analysis is necessary to solve these problems. Moreover, the time to the occurence of the event of interest is taken into account. The Cox model has been widely used in survival analysis, which can effectively handle the censored data. Cox (1972) proposed the model which is focused on the hazard function. The Cox model is assumed to be λ(t|x) = λ0(t) exp(β^Tx) where λ0(t) is an unspecified baseline hazard at time t and X is the vector of covariates, β is a p-dimensional vector of coefficient. In this thesis, the Cox model is considered under the view point of experimental design. The estimability of the parameter β0 in the Cox model, where β0 denotes the true value of β, and the choice of optimal covariates are investigated. We give new representations of the observed information matrix In(β) and extend results for the Cox model of Andersen and Gill (1982). In this way conditions for the estimability of β0 are formulated. Under some regularity conditions, ∑ is the inverse of the asymptotic variance matrix of the MPLE of β0 in the Cox model and then some properties of the asymptotic variance matrix of the MPLE are highlighted. Based on the results of asymptotic estimability, the calculation of local optimal covariates is considered and shown in examples. In a sensitivity analysis, the efficiency of given covariates is calculated. For neighborhoods of the exponential models, the efficiencies have then been found. It is appeared that for fixed parameters β0, the efficiencies do not change very much for different baseline hazard functions. Some proposals for applicable optimal covariates and a calculation procedure for finding optimal covariates are discussed. Furthermore, the extension of the Cox model where time-dependent coefficient are allowed, is investigated. In this situation, the maximum local partial likelihood estimator for estimating the coefficient function β(·) is described. Based on this estimator, we formulate a new test procedure for testing, whether a one-dimensional coefficient function β(·) has a prespecified parametric form, say β(·; ϑ). The score function derived from the local constant partial likelihood function at d distinct grid points is considered. It is shown that the distribution of the properly standardized quadratic form of this d-dimensional vector under the null hypothesis tends to a Chi-squared distribution. Moreover, the limit statement remains true when replacing the unknown ϑ0 by the MPLE in the hypothetical model and an asymptotic α-test is given by the quantiles or p-values of the limiting Chi-squared distribution. Finally, we propose a bootstrap version of this test. The bootstrap test is only defined for the special case of testing whether the coefficient function is constant. A simulation study illustrates the behavior of the bootstrap test under the null hypothesis and a special alternative. It gives quite good results for the chosen underlying model. References P. K. Andersen and R. D. Gill. Cox's regression model for counting processes: a large samplestudy. Ann. Statist., 10(4):1100{1120, 1982. D. R. Cox. Regression models and life-tables. J. Roy. Statist. Soc. Ser. B, 34:187{220, 1972. R. L. Prentice. A case-cohort design for epidemiologic cohort studies and disease prevention trials. Biometrika, 73(1):1{11, 1986.}, language = {en} } @phdthesis{Jankowski2004, author = {Jankowski, \poundsukasz}, title = {Modelling and simulation of light propagation in non-aged and aged step-index polymer optical fibres}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0001649}, school = {Universit{\"a}t Potsdam}, year = {2004}, abstract = {Kunststofflichtwellenleiter (POFs) stellen ein verh{\"a}ltnism{\"a}ßig neues Medium zur optische Datenkommunikation {\"u}ber kurzen Strecken dar. W{\"a}hrend ihrer Einsatzdauer unterliegen POFs unterschiedlichen Arten von Umweltbeanspruchungen, haupts{\"a}chlich durch hohe Temperatur, hohe Feuchtigkeit und mechanischen Belastungen. Zahlreiche experimentelle Forschungen besch{\"a}ftigten sich mit der standardisierten Pr{\"u}fung der Zuverl{\"a}ssigkeit von im Handel erh{\"a}ltlichen Fasern. Jedoch gab es bisher wenig Erfolg bei der Bem{\"u}hung, zwei grundlegende optische Erscheinungen, Absorption und Streuung, die die Lichtausbreitung in Fasern stark beeinflussen, zu verstehen und praktisch zu modellieren: Diese beiden Effekte beschreiben nicht nur die Qualit{\"a}t neuer Fasern, sondern sie werden auch stark durch die Alterungsprozess beeinflusst. Der Hauptzweck dieser Doktorarbeit war es, ein praktisch verwendbares und theoretisch gut fundiertes Modell der Lichtausbreitung in nicht gealterten und gealterten POFs zu entwickeln und es durch optische Experimente zu verifizieren. Dabei wurden anwendungsorientierte Aspekte mit theoretischer POF-Modellierung kombiniert. Die Arbeit enth{\"a}lt die erste bekannte Anwendung der Wellenanalyse zur Untersuchung der winkelabh{\"a}ngigen Eigenschaften der Streuung in Lichtwellenleitern. F{\"u}r die praktischen Experimente wurden mehrere POF-Proben unterschiedlicher Hersteller k{\"u}nstlich gealtert, indem sie bis 4500 Stunden bei 100 °C gelagert wurden (ohne Feuchtekontrolle). Die Parameter der jeweiligen Simulationen wurden mittels einer systematischen Optimierung an die gemessen optischen Eigenschaften der gealterten Proben angeglichen. Die Resultate deuten an, dass der {\"U}bertragungsverlust der gealterten Fasern in den ersten Tagen und Wochen der Alterung am st{\"a}rksten durch eine wesentliche physikalische Verschlechterung der Kern-Mantel-Grenzfl{\"a}che verursacht wird. Chemische Effekte des Alterungsprozesses scheinen im Faserkernmaterial zuerst nach einigen Monaten aufzutreten.}, language = {en} } @phdthesis{Knoechel2019, author = {Kn{\"o}chel, Jane}, title = {Model reduction of mechanism-based pharmacodynamic models and its link to classical drug effect models}, doi = {10.25932/publishup-44059}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-440598}, school = {Universit{\"a}t Potsdam}, pages = {vii, 147}, year = {2019}, abstract = {Continuous insight into biological processes has led to the development of large-scale, mechanistic systems biology models of pharmacologically relevant networks. While these models are typically designed to study the impact of diverse stimuli or perturbations on multiple system variables, the focus in pharmacological research is often on a specific input, e.g., the dose of a drug, and a specific output related to the drug effect or response in terms of some surrogate marker. To study a chosen input-output pair, the complexity of the interactions as well as the size of the models hinders easy access and understanding of the details of the input-output relationship. The objective of this thesis is the development of a mathematical approach, in specific a model reduction technique, that allows (i) to quantify the importance of the different state variables for a given input-output relationship, and (ii) to reduce the dynamics to its essential features -- allowing for a physiological interpretation of state variables as well as parameter estimation in the statistical analysis of clinical data. We develop a model reduction technique using a control theoretic setting by first defining a novel type of time-limited controllability and observability gramians for nonlinear systems. We then show the superiority of the time-limited generalised gramians for nonlinear systems in the context of balanced truncation for a benchmark system from control theory. The concept of time-limited controllability and observability gramians is subsequently used to introduce a state and time-dependent quantity called the input-response (ir) index that quantifies the importance of state variables for a given input-response relationship at a particular time. We subsequently link our approach to sensitivity analysis, thus, enabling for the first time the use of sensitivity coefficients for state space reduction. The sensitivity based ir-indices are given as a product of two sensitivity coefficients. This allows not only for a computational more efficient calculation but also for a clear distinction of the extent to which the input impacts a state variable and the extent to which a state variable impacts the output. The ir-indices give insight into the coordinated action of specific state variables for a chosen input-response relationship. Our developed model reduction technique results in reduced models that still allow for a mechanistic interpretation in terms of the quantities/state variables of the original system, which is a key requirement in the field of systems pharmacology and systems biology and distinguished the reduced models from so-called empirical drug effect models. The ir-indices are explicitly defined with respect to a reference trajectory and thereby dependent on the initial state (this is an important feature of the measure). This is demonstrated for an example from the field of systems pharmacology, showing that the reduced models are very informative in their ability to detect (genetic) deficiencies in certain physiological entities. Comparing our novel model reduction technique to the already existing techniques shows its superiority. The novel input-response index as a measure of the importance of state variables provides a powerful tool for understanding the complex dynamics of large-scale systems in the context of a specific drug-response relationship. Furthermore, the indices provide a means for a very efficient model order reduction and, thus, an important step towards translating insight from biological processes incorporated in detailed systems pharmacology models into the population analysis of clinical data.}, language = {en} } @phdthesis{Friedrich2020, author = {Friedrich, Alexander}, title = {Minimizers of generalized Willmore energies and applications in general relativity}, doi = {10.25932/publishup-48142}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-481423}, school = {Universit{\"a}t Potsdam}, pages = {100}, year = {2020}, abstract = {Das Willmore Funktional ist eine Funktion die jeder Fl{\"a}che in einer Riemannschen Mannigfaltigkeit, ihre totale mittlere Kr{\"u}mmung zuweist. Ein klassisches Problem der Differentialgeometrie ist es geschlossene (kompakt und ohne Rand) Fl{\"a}chen zu finden die das Willmore funktional minimieren, beziehungsweise die kritische Punkte des Willmore Funktionals sind. In dieser Doktorarbeit entwickeln wir ein Konzept von verallgemeinerten Willmore Funktionalen f{\"u}r Fl{\"a}chen in Riemannschen Mannigfaltigkeiten, wobei wir uns von physikalischen Modellen leiten lassen. Insbesondere ist hier die Hawking Energie der allgemeinen Relativit{\"a}tstheorie und die Biegungsenergie von d{\"u}nnen Membranen zu nennen. F{\"u}r dieses verallgemeinerten Willmore Funktionale beweisen wir die Existenz von Minimieren mit vorgeschriebenen Fl{\"a}cheninhalt, in einer geeigneten Klasse von verallgemeinerten Fl{\"a}chen. Insbesondere konstruieren wir Minimierer der oben erw{\"a}hnten Biegungsenergie mit vorgeschrieben Fl{\"a}cheninhalt und vorgeschriebenen, eingeschlossenem Volumen. Außerdem beweisen wir, dass kritische Punkte von verallgemeinerten Willmore Funktionalen mit vorgeschriebenen Fl{\"a}cheninhalt abseits endlich vieler Punkte glatt sind. Dabei st{\"u}tzen wir uns, wie auch im folgenden, auf die bestehende Theorie f{\"u}r das Willmore Funktional. An diese allgemeinen Resultate schließen wir eine detailliertere Analyse der Hawking Energie an. Im Kontext der allgemeinen Relativit{\"a}tstheorie beschreibt die Umgebungsmannigfaltigkeit den Raum zu einem Zeitpunkt. Daher sind wir an dem Wechselspiel zwischen der Hawking Energie und der umgebenden Mannigfaltigkeit interessiert. Wir charakterisieren Punkte in der umgebenden Mannigfaltigkeit f{\"u}r die es in jeder Umgebung eine kritische Fl{\"a}che mit vorgeschriebenem, kleinem Fl{\"a}cheninhalt gibt. Diese Punnkte werden als Konzentrationspunkte der Hawking Energie interpretiert. Außerdem berechnen wir eine Entwicklung der Hawking Energie auf kleinen, runden Sph{\"a}ren. Dadurch k{\"o}nnen wir eine Art Energiedichte der Hawking Energie identifizieren. Hierbei ist anzumerken, dass unsere Resultate im Kontrast zu Ergebnissen in der Literatur stehen. Dort wurde berechnet, dass die Entwicklung der Hawking Energie auf Sph{\"a}ren im Lichtkegel eines Punktes der umgebenden Mannigfaltigkeit in f{\"u}hrender Ordnung proportional zur der klassischen Energiedichte der allgemeinen Relativit{\"a}tstheorie ist. Zu diesem Zeitpunkt ist nicht klar wie diese Diskrepanz zu begr{\"u}nden ist. Ferner betrachten wir asymptotisch Schwarzschild Mannigfaltigkeiten. Sie sind ein Spezialfall von asymptotisch flachen Mannigfaltigkeiten, welche in der allgemeinen Relativit{\"a}tstheorie als Modelle f{\"u}r isolierte Systeme dienen. Die Schwarzschild Raumzeit selbst ist eine rotationssymmetrische Raumzeit die schwarzen Loch beschreibt. In diesen asymptotisch Schwarzschild Mannigfaltigkeiten konstruieren wir eine Bl{\"a}tterung des {\"a}ußeren Bereiches durch kritische Fl{\"a}chen der Hawking Energie mit vorgeschriebenen Fl{\"a}cheninhalt. Diese Bl{\"a}tterung kann in einem verallgemeinertem Sinne als Schwerpunkt des isolierten Systems betrachtet werden. Außerdem zeigen wir, dass die Hawking Energie entlang der Bl{\"a}tterung w{\"a}chst je gr{\"o}ßer die Fl{\"a}chen werden.}, language = {en} } @phdthesis{Bettenbuehl2015, author = {Bettenb{\"u}hl, Mario}, title = {Microsaccades}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-122-6}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72622}, school = {Universit{\"a}t Potsdam}, pages = {iv, 126}, year = {2015}, abstract = {The first thing we do upon waking is open our eyes. Rotating them in our eye sockets, we scan our surroundings and collect the information into a picture in our head. Eye movements can be split into saccades and fixational eye movements, which occur when we attempt to fixate our gaze. The latter consists of microsaccades, drift and tremor. Before we even lift our eye lids, eye movements - such as saccades and microsaccades that let the eyes jump from one to another position - have partially been prepared in the brain stem. Saccades and microsaccades are often assumed to be generated by the same mechanisms. But how saccades and microsaccades can be classified according to shape has not yet been reported in a statistical manner. Research has put more effort into the investigations of microsaccades' properties and generation only since the last decade. Consequently, we are only beginning to understand the dynamic processes governing microsaccadic eye movements. Within this thesis, the dynamics governing the generation of microsaccades is assessed and the development of a model for the underlying processes. Eye movement trajectories from different experiments are used, recorded with a video-based eye tracking technique, and a novel method is proposed for the scale-invariant detection of saccades (events of large amplitude) and microsaccades (events of small amplitude). Using a time-frequency approach, the method is examined with different experiments and validated against simulated data. A shape model is suggested that allows for a simple estimation of saccade- and microsaccade related properties. For sequences of microsaccades, in this thesis a time-dynamic Markov model is proposed, with a memory horizon that changes over time and which can best describe sequences of microsaccades.}, language = {en} } @phdthesis{Samaras2016, author = {Samaras, Stefanos}, title = {Microphysical retrieval of non-spherical aerosol particles using regularized inversion of multi-wavelength lidar data}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-396528}, school = {Universit{\"a}t Potsdam}, pages = {xiv, 190}, year = {2016}, abstract = {Numerous reports of relatively rapid climate changes over the past century make a clear case of the impact of aerosols and clouds, identified as sources of largest uncertainty in climate projections. Earth's radiation balance is altered by aerosols depending on their size, morphology and chemical composition. Competing effects in the atmosphere can be further studied by investigating the evolution of aerosol microphysical properties, which are the focus of the present work. The aerosol size distribution, the refractive index, and the single scattering albedo are commonly used such properties linked to aerosol type, and radiative forcing. Highly advanced lidars (light detection and ranging) have reduced aerosol monitoring and optical profiling into a routine process. Lidar data have been widely used to retrieve the size distribution through the inversion of the so-called Lorenz-Mie model (LMM). This model offers a reasonable treatment for spherically approximated particles, it no longer provides, though, a viable description for other naturally occurring arbitrarily shaped particles, such as dust particles. On the other hand, non-spherical geometries as simple as spheroids reproduce certain optical properties with enhanced accuracy. Motivated by this, we adapt the LMM to accommodate the spheroid-particle approximation introducing the notion of a two-dimensional (2D) shape-size distribution. Inverting only a few optical data points to retrieve the shape-size distribution is classified as a non-linear ill-posed problem. A brief mathematical analysis is presented which reveals the inherent tendency towards highly oscillatory solutions, explores the available options for a generalized solution through regularization methods and quantifies the ill-posedness. The latter will improve our understanding on the main cause fomenting instability in the produced solution spaces. The new approach facilitates the exploitation of additional lidar data points from depolarization measurements, associated with particle non-sphericity. However, the generalization of LMM vastly increases the complexity of the problem. The underlying theory for the calculation of the involved optical cross sections (T-matrix theory) is computationally so costly, that would limit a retrieval analysis to an unpractical point. Moreover the discretization of the model equation by a 2D collocation method, proposed in this work, involves double integrations which are further time consuming. We overcome these difficulties by using precalculated databases and a sophisticated retrieval software (SphInX: Spheroidal Inversion eXperiments) especially developed for our purposes, capable of performing multiple-dataset inversions and producing a wide range of microphysical retrieval outputs. Hybrid regularization in conjunction with minimization processes is used as a basis for our algorithms. Synthetic data retrievals are performed simulating various atmospheric scenarios in order to test the efficiency of different regularization methods. The gap in contemporary literature in providing full sets of uncertainties in a wide variety of numerical instances is of major concern here. For this, the most appropriate methods are identified through a thorough analysis on an overall-behavior basis regarding accuracy and stability. The general trend of the initial size distributions is captured in our numerical experiments and the reconstruction quality depends on data error level. Moreover, the need for more or less depolarization points is explored for the first time from the point of view of the microphysical retrieval. Finally, our approach is tested in various measurement cases giving further insight for future algorithm improvements.}, language = {en} } @phdthesis{Gopalakrishnan2016, author = {Gopalakrishnan, Sathej}, title = {Mathematical modelling of host-disease-drug interactions in HIV disease}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-100100}, school = {Universit{\"a}t Potsdam}, pages = {121}, year = {2016}, abstract = {The human immunodeficiency virus (HIV) has resisted nearly three decades of efforts targeting a cure. Sustained suppression of the virus has remained a challenge, mainly due to the remarkable evolutionary adaptation that the virus exhibits by the accumulation of drug-resistant mutations in its genome. Current therapeutic strategies aim at achieving and maintaining a low viral burden and typically involve multiple drugs. The choice of optimal combinations of these drugs is crucial, particularly in the background of treatment failure having occurred previously with certain other drugs. An understanding of the dynamics of viral mutant genotypes aids in the assessment of treatment failure with a certain drug combination, and exploring potential salvage treatment regimens. Mathematical models of viral dynamics have proved invaluable in understanding the viral life cycle and the impact of antiretroviral drugs. However, such models typically use simplified and coarse-grained mutation schemes, that curbs the extent of their application to drug-specific clinical mutation data, in order to assess potential next-line therapies. Statistical models of mutation accumulation have served well in dissecting mechanisms of resistance evolution by reconstructing mutation pathways under different drug-environments. While these models perform well in predicting treatment outcomes by statistical learning, they do not incorporate drug effect mechanistically. Additionally, due to an inherent lack of temporal features in such models, they are less informative on aspects such as predicting mutational abundance at treatment failure. This limits their application in analyzing the pharmacology of antiretroviral drugs, in particular, time-dependent characteristics of HIV therapy such as pharmacokinetics and pharmacodynamics, and also in understanding the impact of drug efficacy on mutation dynamics. In this thesis, we develop an integrated model of in vivo viral dynamics incorporating drug-specific mutation schemes learned from clinical data. Our combined modelling approach enables us to study the dynamics of different mutant genotypes and assess mutational abundance at virological failure. As an application of our model, we estimate in vivo fitness characteristics of viral mutants under different drug environments. Our approach also extends naturally to multiple-drug therapies. Further, we demonstrate the versatility of our model by showing how it can be modified to incorporate recently elucidated mechanisms of drug action including molecules that target host factors. Additionally, we address another important aspect in the clinical management of HIV disease, namely drug pharmacokinetics. It is clear that time-dependent changes in in vivo drug concentration could have an impact on the antiviral effect, and also influence decisions on dosing intervals. We present a framework that provides an integrated understanding of key characteristics of multiple-dosing regimens including drug accumulation ratios and half-lifes, and then explore the impact of drug pharmacokinetics on viral suppression. Finally, parameter identifiability in such nonlinear models of viral dynamics is always a concern, and we investigate techniques that alleviate this issue in our setting.}, language = {en} } @phdthesis{Schindler2023, author = {Schindler, Daniel}, title = {Mathematical modeling and simulation of protrusion-driven cell dynamics}, doi = {10.25932/publishup-61327}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-613275}, school = {Universit{\"a}t Potsdam}, pages = {VI, 161}, year = {2023}, abstract = {Amoeboid cell motility takes place in a variety of biomedical processes such as cancer metastasis, embryonic morphogenesis, and wound healing. In contrast to other forms of cell motility, it is mainly driven by substantial cell shape changes. Based on the interplay of explorative membrane protrusions at the front and a slower-acting membrane retraction at the rear, the cell moves in a crawling kind of way. Underlying these protrusions and retractions are multiple physiological processes resulting in changes of the cytoskeleton, a meshwork of different multi-functional proteins. The complexity and versatility of amoeboid cell motility raise the need for novel computational models based on a profound theoretical framework to analyze and simulate the dynamics of the cell shape. The objective of this thesis is the development of (i) a mathematical framework to describe contour dynamics in time and space, (ii) a computational model to infer expansion and retraction characteristics of individual cell tracks and to produce realistic contour dynamics, (iii) and a complementing Open Science approach to make the above methods fully accessible and easy to use. In this work, we mainly used single-cell recordings of the model organism Dictyostelium discoideum. Based on stacks of segmented microscopy images, we apply a Bayesian approach to obtain smooth representations of the cell membrane, so-called cell contours. We introduce a one-parameter family of regularized contour flows to track reference points on the contour (virtual markers) in time and space. This way, we define a coordinate system to visualize local geometric and dynamic quantities of individual contour dynamics in so-called kymograph plots. In particular, we introduce the local marker dispersion as a measure to identify membrane protrusions and retractions in a fully automated way. This mathematical framework is the basis of a novel contour dynamics model, which consists of three biophysiologically motivated components: one stochastic term, accounting for membrane protrusions, and two deterministic terms to control the shape and area of the contour, which account for membrane retractions. Our model provides a fully automated approach to infer protrusion and retraction characteristics from experimental cell tracks while being also capable of simulating realistic and qualitatively different contour dynamics. Furthermore, the model is used to classify two different locomotion types: the amoeboid and a so-called fan-shaped type. With the complementing Open Science approach, we ensure a high standard regarding the usability of our methods and the reproducibility of our research. In this context, we introduce our software publication named AmoePy, an open-source Python package to segment, analyze, and simulate amoeboid cell motility. Furthermore, we describe measures to improve its usability and extensibility, e.g., by detailed run instructions and an automatically generated source code documentation, and to ensure its functionality and stability, e.g., by automatic software tests, data validation, and a hierarchical package structure. The mathematical approaches of this work provide substantial improvements regarding the modeling and analysis of amoeboid cell motility. We deem the above methods, due to their generalized nature, to be of greater value for other scientific applications, e.g., varying organisms and experimental setups or the transition from unicellular to multicellular movement. Furthermore, we enable other researchers from different fields, i.e., mathematics, biophysics, and medicine, to apply our mathematical methods. By following Open Science standards, this work is of greater value for the cell migration community and a potential role model for other Open Science contributions.}, language = {en} } @phdthesis{Solms2017, author = {Solms, Alexander Maximilian}, title = {Integrating nonlinear mixed effects and physiologically-based modeling approaches for the analysis of repeated measurement studies}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-397070}, school = {Universit{\"a}t Potsdam}, pages = {x, 141}, year = {2017}, abstract = {During the drug discovery \& development process, several phases encompassing a number of preclinical and clinical studies have to be successfully passed to demonstrate safety and efficacy of a new drug candidate. As part of these studies, the characterization of the drug's pharmacokinetics (PK) is an important aspect, since the PK is assumed to strongly impact safety and efficacy. To this end, drug concentrations are measured repeatedly over time in a study population. The objectives of such studies are to describe the typical PK time-course and the associated variability between subjects. Furthermore, underlying sources significantly contributing to this variability, e.g. the use of comedication, should be identified. The most commonly used statistical framework to analyse repeated measurement data is the nonlinear mixed effect (NLME) approach. At the same time, ample knowledge about the drug's properties already exists and has been accumulating during the discovery \& development process: Before any drug is tested in humans, detailed knowledge about the PK in different animal species has to be collected. This drug-specific knowledge and general knowledge about the species' physiology is exploited in mechanistic physiological based PK (PBPK) modeling approaches -it is, however, ignored in the classical NLME modeling approach. Mechanistic physiological based models aim to incorporate relevant and known physiological processes which contribute to the overlying process of interest. In comparison to data--driven models they are usually more complex from a mathematical perspective. For example, in many situations, the number of model parameters outrange the number of measurements and thus reliable parameter estimation becomes more complex and partly impossible. As a consequence, the integration of powerful mathematical estimation approaches like the NLME modeling approach -which is widely used in data-driven modeling -and the mechanistic modeling approach is not well established; the observed data is rather used as a confirming instead of a model informing and building input. Another aggravating circumstance of an integrated approach is the inaccessibility to the details of the NLME methodology so that these approaches can be adapted to the specifics and needs of mechanistic modeling. Despite the fact that the NLME modeling approach exists for several decades, details of the mathematical methodology is scattered around a wide range of literature and a comprehensive, rigorous derivation is lacking. Available literature usually only covers selected parts of the mathematical methodology. Sometimes, important steps are not described or are only heuristically motivated, e.g. the iterative algorithm to finally determine the parameter estimates. Thus, in the present thesis the mathematical methodology of NLME modeling is systemically described and complemented to a comprehensive description, comprising the common theme from ideas and motivation to the final parameter estimation. Therein, new insights for the interpretation of different approximation methods used in the context of the NLME modeling approach are given and illustrated; furthermore, similarities and differences between them are outlined. Based on these findings, an expectation-maximization (EM) algorithm to determine estimates of a NLME model is described. Using the EM algorithm and the lumping methodology by Pilari2010, a new approach on how PBPK and NLME modeling can be combined is presented and exemplified for the antibiotic levofloxacin. Therein, the lumping identifies which processes are informed by the available data and the respective model reduction improves the robustness in parameter estimation. Furthermore, it is shown how apriori known factors influencing the variability and apriori known unexplained variability is incorporated to further mechanistically drive the model development. Concludingly, correlation between parameters and between covariates is automatically accounted for due to the mechanistic derivation of the lumping and the covariate relationships. A useful feature of PBPK models compared to classical data-driven PK models is in the possibility to predict drug concentration within all organs and tissue in the body. Thus, the resulting PBPK model for levofloxacin is used to predict drug concentrations and their variability within soft tissues which are the site of action for levofloxacin. These predictions are compared with data of muscle and adipose tissue obtained by microdialysis, which is an invasive technique to measure a proportion of drug in the tissue, allowing to approximate the concentrations in the interstitial fluid of tissues. Because, so far, comparing human in vivo tissue PK and PBPK predictions are not established, a new conceptual framework is derived. The comparison of PBPK model predictions and microdialysis measurements shows an adequate agreement and reveals further strengths of the presented new approach. We demonstrated how mechanistic PBPK models, which are usually developed in the early stage of drug development, can be used as basis for model building in the analysis of later stages, i.e. in clinical studies. As a consequence, the extensively collected and accumulated knowledge about species and drug are utilized and updated with specific volunteer or patient data. The NLME approach combined with mechanistic modeling reveals new insights for the mechanistic model, for example identification and quantification of variability in mechanistic processes. This represents a further contribution to the learn \& confirm paradigm across different stages of drug development. Finally, the applicability of mechanism--driven model development is demonstrated on an example from the field of Quantitative Psycholinguistics to analyse repeated eye movement data. Our approach gives new insight into the interpretation of these experiments and the processes behind.}, language = {en} } @phdthesis{Busaman2006, author = {Busaman, Saofee}, title = {Hyperequational theory for partial algebras}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-12048}, school = {Universit{\"a}t Potsdam}, year = {2006}, abstract = {Our work goes in two directions. At first we want to transfer definitions, concepts and results of the theory of hyperidentities and solid varieties from the total to the partial case. (1) We prove that the operators chi^A_RNF and chi^E_RNF are only monotone and additive and we show that the sets of all fixed points of these operators are characterized only by three instead of four equivalent conditions for the case of closure operators. (2) We prove that V is n - SF-solid iff clone^SF V is free with respect to itself, freely generated by the independent set {[fi(x_1, . . . , x_n)]Id^SF_n V | i \in I}. (3) We prove that if V is n-fluid and ~V |P(V ) =~V -iso |P(V ) then V is kunsolid for k >= n (where P(V ) is the set of all V -proper hypersubstitutions of type \tau ). (4) We prove that a strong M-hyperquasi-equational theory is characterized by four equivalent conditions. The second direction of our work is to follow ideas which are typical for the partial case. (1) We characterize all minimal partial clones which are strongly solidifyable. (2)We define the operator Chi^A_Ph where Ph is a monoid of regular partial hypersubstitutions.Using this concept, we define the concept of a Phyp_R(\tau )-solid strong regular variety of partial algebras and we prove that a PHyp_R(\tau )-solid strong regular variety satisfies four equivalent conditions.}, language = {en} } @phdthesis{Reinhardt2020, author = {Reinhardt, Maria}, title = {Hybrid filters and multi-scale models}, doi = {10.25932/publishup-47435}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-474356}, school = {Universit{\"a}t Potsdam}, pages = {xiii, 102}, year = {2020}, abstract = {This thesis is concerned with Data Assimilation, the process of combining model predictions with observations. So called filters are of special interest. One is inter- ested in computing the probability distribution of the state of a physical process in the future, given (possibly) imperfect measurements. This is done using Bayes' rule. The first part focuses on hybrid filters, that bridge between the two main groups of filters: ensemble Kalman filters (EnKF) and particle filters. The first are a group of very stable and computationally cheap algorithms, but they request certain strong assumptions. Particle filters on the other hand are more generally applicable, but computationally expensive and as such not always suitable for high dimensional systems. Therefore it exists a need to combine both groups to benefit from the advantages of each. This can be achieved by splitting the likelihood function, when assimilating a new observation and treating one part of it with an EnKF and the other part with a particle filter. The second part of this thesis deals with the application of Data Assimilation to multi-scale models and the problems that arise from that. One of the main areas of application for Data Assimilation techniques is predicting the development of oceans and the atmosphere. These processes involve several scales and often balance rela- tions between the state variables. The use of Data Assimilation procedures most often violates relations of that kind, which leads to unrealistic and non-physical pre- dictions of the future development of the process eventually. This work discusses the inclusion of a post-processing step after each assimilation step, in which a minimi- sation problem is solved, which penalises the imbalance. This method is tested on four different models, two Hamiltonian systems and two spatially extended models, which adds even more difficulties.}, language = {en} } @phdthesis{Santilli2017, author = {Santilli, Mario}, title = {Higher order rectifiability in Euclidean space}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-403632}, school = {Universit{\"a}t Potsdam}, pages = {45}, year = {2017}, abstract = {The first main goal of this thesis is to develop a concept of approximate differentiability of higher order for subsets of the Euclidean space that allows to characterize higher order rectifiable sets, extending somehow well known facts for functions. We emphasize that for every subset A of the Euclidean space and for every integer k ≥ 2 we introduce the approximate differential of order k of A and we prove it is a Borel map whose domain is a (possibly empty) Borel set. This concept could be helpful to deal with higher order rectifiable sets in applications. The other goal is to extend to general closed sets a well known theorem of Alberti on the second order rectifiability properties of the boundary of convex bodies. The Alberti theorem provides a stratification of second order rectifiable subsets of the boundary of a convex body based on the dimension of the (convex) normal cone. Considering a suitable generalization of this normal cone for general closed subsets of the Euclidean space and employing some results from the first part we can prove that the same stratification exists for every closed set.}, language = {en} } @phdthesis{Lewandowski2019, author = {Lewandowski, Max}, title = {Hadamard states for bosonic quantum field theory on globally hyperbolic spacetimes}, doi = {10.25932/publishup-43938}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-439381}, school = {Universit{\"a}t Potsdam}, pages = {v, 69}, year = {2019}, abstract = {Quantenfeldtheorie auf gekr{\"u}mmten Raumzeiten ist eine semiklassische N{\"a}herung einer Quantentheorie der Gravitation, im Rahmen derer ein Quantenfeld unter dem Einfluss eines klassisch modellierten Gravitationsfeldes, also einer gekr{\"u}mmten Raumzeit, beschrieben wird. Eine der bemerkenswertesten Vorhersagen dieses Ansatzes ist die Erzeugung von Teilchen durch die gekr{\"u}mmte Raumzeit selbst, wie zum Beispiel durch Hawkings Verdampfen schwarzer L{\"o}cher und den Unruh Effekt. Andererseits deuten diese Aspekte bereits an, dass fundamentale Grundpfeiler der Theorie auf dem Minkowskiraum, insbesondere ein ausgezeichneter Vakuumzustand und damit verbunden der Teilchenbegriff, f{\"u}r allgemeine gekr{\"u}mmte Raumzeiten keine sinnvolle Entsprechung besitzen. Gleichermaßen ben{\"o}tigen wir eine alternative Implementierung von Kovarianz in die Theorie, da gekr{\"u}mmte Raumzeiten im Allgemeinen keine nicht-triviale globale Symmetrie aufweisen. Letztere Problematik konnte im Rahmen lokal-kovarianter Quantenfeldtheorie gel{\"o}st werden, wohingegen die Abwesenheit entsprechender Konzepte f{\"u}r Vakuum und Teilchen in diesem allgemeinen Fall inzwischen sogar in Form von no-go-Aussagen manifestiert wurde. Beim algebraischen Ansatz f{\"u}r eine Quantenfeldtheorie werden zun{\"a}chst Observablen eingef{\"u}hrt und erst anschließend Zust{\"a}nde via Zuordnung von Erwartungswerten. Obwohl die Observablen unter physikalischen Gesichtspunkten konstruiert werden, existiert dennoch eine große Anzahl von m{\"o}glichen Zust{\"a}nden, von denen viele, aus physikalischen Blickwinkeln betrachtet, nicht sinnvoll sind. Dieses Konzept von Zust{\"a}nden ist daher noch zu allgemein und bedarf weiterer physikalisch motivierter Einschr{\"a}nkungen. Beispielsweise ist es nat{\"u}rlich, sich im Falle freier Quantenfeldtheorien mit linearen Feldgleichungen auf quasifreie Zust{\"a}nde zu konzentrieren. Dar{\"u}ber hinaus ist die Renormierung von Erwartungswerten f{\"u}r Produkte von Feldern von zentraler Bedeutung. Dies betrifft insbesondere den Energie-Impuls-Tensor, dessen Erwartungswert durch distributionelle Bil{\"o}sungen der Feldgleichungen gegeben ist. Tats{\"a}chlich liefert J. Hadamard Theorie hyperbolischer Differentialgleichungen Bil{\"o}sungen mit festem singul{\"a}ren Anteil, so dass ein geeignetes Renormierungsverfahren definiert werden kann. Die sogenannte Hadamard-Bedingung an Bidistributionen steht f{\"u}r die Forderung einer solchen Singularit{\"a}tenstruktur und sie hat sich etabliert als nat{\"u}rliche Verallgemeinerung der f{\"u}r flache Raumzeiten formulierten Spektralbedingung. Seit Radzikowskis wegweisenden Resultaten l{\"a}sst sie sich außerdem lokal ausdr{\"u}cken, n{\"a}mlich als eine Bedingung an die Wellenfrontenmenge der Bil{\"o}sung. Diese Formulierung schl{\"a}gt eine Br{\"u}cke zu der von Duistermaat und H{\"o}rmander entwickelten mikrolokalen Analysis, die seitdem bei der {\"U}berpr{\"u}fung der Hadamard-Bedingung sowie der Konstruktion von Hadamard Zust{\"a}nden vielfach Verwendung findet und rasante Fortschritte auf diesem Gebiet ausgel{\"o}st hat. Obwohl unverzichtbar f{\"u}r die Analyse der Charakteristiken von Operatoren und ihrer Parametrizen sind die Methoden und Aussagen der mikrolokalen Analysis ungeeignet f{\"u}r die Analyse von nicht-singul{\"a}ren Strukturen und zentrale Aussagen sind typischerweise bis auf glatte Anteile formuliert. Beispielsweise lassen sich aus Radzikowskis Resultaten nahezu direkt Existenzaussagen und sogar ein konkretes Konstruktionsschema f{\"u}r Hadamard Zust{\"a}nde ableiten, die {\"u}brigen Eigenschaften (Bil{\"o}sung, Kausalit{\"a}t, Positivit{\"a}t) k{\"o}nnen jedoch auf diesem Wege nur modulo glatte Funktionen gezeigt werden. Es ist das Ziel dieser Dissertation, diesen Ansatz f{\"u}r lineare Wellenoperatoren auf Schnitten in Vektorb{\"u}ndeln {\"u}ber global-hyperbolischen Lorentz-Mannigfaltigkeiten zu vollenden und, ausgehend von einer lokalen Hadamard Reihe, Hadamard Zust{\"a}nde zu konstruieren. Beruhend auf Wightmans L{\"o}sung f{\"u}r die d'Alembert-Gleichung auf dem Minkowski-Raum und der Herleitung der avancierten und retardierten Fundamentall{\"o}sung konstruieren wir lokal Parametrizen in Form von Hadamard-Reihen und f{\"u}gen sie zu globalen Bil{\"o}sungen zusammen. Diese besitzen dann die Hadamard-Eigenschaft und wir zeigen anschließend, dass glatte Bischnitte existieren, die addiert werden k{\"o}nnen, so dass die verbleibenden Bedingungen erf{\"u}llt sind.}, language = {en} } @phdthesis{Kollosche2014, author = {Kollosche, David}, title = {Gesellschaft, Mathematik und Unterricht : ein Beitrag zum soziologisch-kritischen Verst{\"a}ndnis der gesellschaftlichen Funktionen des Mathematikunterrichts}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70726}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Die vorliegende Studie untersucht die gesellschaftliche Rolle des gegenw{\"a}rtigen Mathematikunterrichts an deutschen allgemeinbildenden Schulen aus einer soziologisch-kritischen Perspektive. In Zentrum des Interesses steht die durch den Mathematikunterricht erfahrene Sozialisation. Die Studie umfasst unter anderem eine Literaturdiskussion, die Ausarbeitung eines soziologischen Rahmens auf der Grundlage des Werks von Michel Foucault und zwei Teilstudien zur Soziologie der Logik und des Rechnens. Abschließend werden Dispositive des Mathematischen beschrieben, die darlegen, in welcher Art und mit welcher pers{\"o}nlichen und gesellschaftlichen Folgen der gegenw{\"a}rtige Mathematikunterricht eine spezielle Geisteshaltung etabliert.}, language = {de} } @phdthesis{Ziese2014, author = {Ziese, Ramona}, title = {Geometric electroelasticity}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72504}, school = {Universit{\"a}t Potsdam}, pages = {vi, 113}, year = {2014}, abstract = {In this work a diffential geometric formulation of the theory of electroelasticity is developed which also includes thermal and magnetic influences. We study the motion of bodies consisting of an elastic material that are deformed by the influence of mechanical forces, heat and an external electromagnetic field. To this end physical balance laws (conservation of mass, balance of momentum, angular momentum and energy) are established. These provide an equation that describes the motion of the body during the deformation. Here the body and the surrounding space are modeled as Riemannian manifolds, and we allow that the body has a lower dimension than the surrounding space. In this way one is not (as usual) restricted to the description of the deformation of three-dimensional bodies in a three-dimensional space, but one can also describe the deformation of membranes and the deformation in a curved space. Moreover, we formulate so-called constitutive relations that encode the properties of the used material. Balance of energy as a scalar law can easily be formulated on a Riemannian manifold. The remaining balance laws are then obtained by demanding that balance of energy is invariant under the action of arbitrary diffeomorphisms on the surrounding space. This generalizes a result by Marsden and Hughes that pertains to bodies that have the same dimension as the surrounding space and does not allow the presence of electromagnetic fields. Usually, in works on electroelasticity the entropy inequality is used to decide which otherwise allowed deformations are physically admissible and which are not. It is alsoemployed to derive restrictions to the possible forms of constitutive relations describing the material. Unfortunately, the opinions on the physically correct statement of the entropy inequality diverge when electromagnetic fields are present. Moreover, it is unclear how to formulate the entropy inequality in the case of a membrane that is subjected to an electromagnetic field. Thus, we show that one can replace the use of the entropy inequality by the demand that for a given process balance of energy is invariant under the action of arbitrary diffeomorphisms on the surrounding space and under linear rescalings of the temperature. On the one hand, this demand also yields the desired restrictions to the form of the constitutive relations. On the other hand, it needs much weaker assumptions than the arguments in physics literature that are employing the entropy inequality. Again, our result generalizes a theorem of Marsden and Hughes. This time, our result is, like theirs, only valid for bodies that have the same dimension as the surrounding space.}, language = {en} } @phdthesis{Rafler2009, author = {Rafler, Mathias}, title = {Gaussian loop- and P{\´o}lya processes : a point process approach}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-029-8}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-38706}, school = {Universit{\"a}t Potsdam}, pages = {ix, 162}, year = {2009}, abstract = {This thesis considers on the one hand the construction of point processes via conditional intensities, motivated by the partial Integration of the Campbell measure of a point process. Under certain assumptions on the intensity the existence of such a point process is shown. A fundamental example turns out to be the P{\´o}lya sum process, whose conditional intensity is a generalisation of the P{\´o}lya urn dynamics. A Cox process representation for that point process is shown. A further process considered is a Poisson process of Gaussian loops, which represents a noninteracting particle system derived from the discussion of indistinguishable particles. Both processes are used to define particle systems locally, for which thermodynamic limits are determined.}, language = {en} } @phdthesis{Dyachenko2014, author = {Dyachenko, Evgeniya}, title = {Elliptic problems with small parameter}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72056}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {In this thesis we consider diverse aspects of existence and correctness of asymptotic solutions to elliptic differential and pseudodifferential equations. We begin our studies with the case of a general elliptic boundary value problem in partial derivatives. A small parameter enters the coefficients of the main equation as well as into the boundary conditions. Such equations have already been investigated satisfactory, but there still exist certain theoretical deficiencies. Our aim is to present the general theory of elliptic problems with a small parameter. For this purpose we examine in detail the case of a bounded domain with a smooth boundary. First of all, we construct formal solutions as power series in the small parameter. Then we examine their asymptotic properties. It suffices to carry out sharp two-sided \emph{a priori} estimates for the operators of boundary value problems which are uniform in the small parameter. Such estimates failed to hold in functional spaces used in classical elliptic theory. To circumvent this limitation we exploit norms depending on the small parameter for the functions defined on a bounded domain. Similar norms are widely used in literature, but their properties have not been investigated extensively. Our theoretical investigation shows that the usual elliptic technique can be correctly carried out in these norms. The obtained results also allow one to extend the norms to compact manifolds with boundaries. We complete our investigation by formulating algebraic conditions on the operators and showing their equivalence to the existence of a priori estimates. In the second step, we extend the concept of ellipticity with a small parameter to more general classes of operators. Firstly, we want to compare the difference in asymptotic patterns between the obtained series and expansions for similar differential problems. Therefore we investigate the heat equation in a bounded domain with a small parameter near the time derivative. In this case the characteristics touch the boundary at a finite number of points. It is known that the solutions are not regular in a neighbourhood of such points in advance. We suppose moreover that the boundary at such points can be non-smooth but have cuspidal singularities. We find a formal asymptotic expansion and show that when a set of parameters comes through a threshold value, the expansions fail to be asymptotic. The last part of the work is devoted to general concept of ellipticity with a small parameter. Several theoretical extensions to pseudodifferential operators have already been suggested in previous studies. As a new contribution we involve the analysis on manifolds with edge singularities which allows us to consider wider classes of perturbed elliptic operators. We examine that introduced classes possess a priori estimates of elliptic type. As a further application we demonstrate how developed tools can be used to reduce singularly perturbed problems to regular ones.}, language = {en} } @phdthesis{Jakobs2019, author = {Jakobs, Friedrich}, title = {Dubrovin-rings and their connection to Hughes-free skew fields of fractions}, doi = {10.25932/publishup-43556}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-435561}, school = {Universit{\"a}t Potsdam}, pages = {ix, 62}, year = {2019}, abstract = {One method of embedding groups into skew fields was introduced by A. I. Mal'tsev and B. H. Neumann (cf. [18, 19]). If G is an ordered group and F is a skew field, the set F((G)) of formal power series over F in G with well-ordered support forms a skew field into which the group ring F[G] can be embedded. Unfortunately it is not suficient that G is left-ordered since F((G)) is only an F-vector space in this case as there is no natural way to define a multiplication on F((G)). One way to extend the original idea onto left-ordered groups is to examine the endomorphism ring of F((G)) as explored by N. I. Dubrovin (cf. [5, 6]). It is possible to embed any crossed product ring F[G; η, σ] into the endomorphism ring of F((G)) such that each non-zero element of F[G; η, σ] defines an automorphism of F((G)) (cf. [5, 10]). Thus, the rational closure of F[G; η, σ] in the endomorphism ring of F((G)), which we will call the Dubrovin-ring of F[G; η, σ], is a potential candidate for a skew field of fractions of F[G; η, σ]. The methods of N. I. Dubrovin allowed to show that specific classes of groups can be embedded into a skew field. For example, N. I. Dubrovin contrived some special criteria, which are applicable on the universal covering group of SL(2, R). These methods have also been explored by J. Gr{\"a}ter and R. P. Sperner (cf. [10]) as well as N.H. Halimi and T. Ito (cf. [11]). Furthermore, it is of interest to know if skew fields of fractions are unique. For example, left and right Ore domains have unique skew fields of fractions (cf. [2]). This is not the general case as for example the free group with 2 generators can be embedded into non-isomorphic skew fields of fractions (cf. [12]). It seems likely that Ore domains are the most general case for which unique skew fields of fractions exist. One approach to gain uniqueness is to restrict the search to skew fields of fractions with additional properties. I. Hughes has defined skew fields of fractions of crossed product rings F[G; η, σ] with locally indicable G which fulfill a special condition. These are called Hughes-free skew fields of fractions and I. Hughes has proven that they are unique if they exist [13, 14]. This thesis will connect the ideas of N. I. Dubrovin and I. Hughes. The first chapter contains the basic terminology and concepts used in this thesis. We present methods provided by N. I. Dubrovin such as the complexity of elements in rational closures and special properties of endomorphisms of the vector space of formal power series F((G)). To combine the ideas of N.I. Dubrovin and I. Hughes we introduce Conradian left-ordered groups of maximal rank and examine their connection to locally indicable groups. Furthermore we provide notations for crossed product rings, skew fields of fractions as well as Dubrovin-rings and prove some technical statements which are used in later parts. The second chapter focuses on Hughes-free skew fields of fractions and their connection to Dubrovin-rings. For that purpose we introduce series representations to interpret elements of Hughes-free skew fields of fractions as skew formal Laurent series. This 1 Introduction allows us to prove that for Conradian left-ordered groups G of maximal rank the statement "F[G; η, σ] has a Hughes-free skew field of fractions" implies "The Dubrovin ring of F [G; η, σ] is a skew field". We will also prove the reverse and apply the results to give a new prove of Theorem 1 in [13]. Furthermore we will show how to extend injective ring homomorphisms of some crossed product rings onto their Hughes-free skew fields of fractions. At last we will be able to answer the open question whether Hughes--free skew fields are strongly Hughes-free (cf. [17, page 53]).}, language = {en} } @phdthesis{Muecke2017, author = {M{\"u}cke, Nicole}, title = {Direct and inverse problems in machine learning}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-403479}, school = {Universit{\"a}t Potsdam}, pages = {159}, year = {2017}, abstract = {We analyze an inverse noisy regression model under random design with the aim of estimating the unknown target function based on a given set of data, drawn according to some unknown probability distribution. Our estimators are all constructed by kernel methods, which depend on a Reproducing Kernel Hilbert Space structure using spectral regularization methods. A first main result establishes upper and lower bounds for the rate of convergence under a given source condition assumption, restricting the class of admissible distributions. But since kernel methods scale poorly when massive datasets are involved, we study one example for saving computation time and memory requirements in more detail. We show that Parallelizing spectral algorithms also leads to minimax optimal rates of convergence provided the number of machines is chosen appropriately. We emphasize that so far all estimators depend on the assumed a-priori smoothness of the target function and on the eigenvalue decay of the kernel covariance operator, which are in general unknown. To obtain good purely data driven estimators constitutes the problem of adaptivity which we handle for the single machine problem via a version of the Lepskii principle.}, language = {en} } @phdthesis{Berner2016, author = {Berner, Nadine}, title = {Deciphering multiple changes in complex climate time series using Bayesian inference}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-100065}, school = {Universit{\"a}t Potsdam}, pages = {xvi, 135}, year = {2016}, abstract = {Change points in time series are perceived as heterogeneities in the statistical or dynamical characteristics of the observations. Unraveling such transitions yields essential information for the understanding of the observed system's intrinsic evolution and potential external influences. A precise detection of multiple changes is therefore of great importance for various research disciplines, such as environmental sciences, bioinformatics and economics. The primary purpose of the detection approach introduced in this thesis is the investigation of transitions underlying direct or indirect climate observations. In order to develop a diagnostic approach capable to capture such a variety of natural processes, the generic statistical features in terms of central tendency and dispersion are employed in the light of Bayesian inversion. In contrast to established Bayesian approaches to multiple changes, the generic approach proposed in this thesis is not formulated in the framework of specialized partition models of high dimensionality requiring prior specification, but as a robust kernel-based approach of low dimensionality employing least informative prior distributions. First of all, a local Bayesian inversion approach is developed to robustly infer on the location and the generic patterns of a single transition. The analysis of synthetic time series comprising changes of different observational evidence, data loss and outliers validates the performance, consistency and sensitivity of the inference algorithm. To systematically investigate time series for multiple changes, the Bayesian inversion is extended to a kernel-based inference approach. By introducing basic kernel measures, the weighted kernel inference results are composed into a proxy probability to a posterior distribution of multiple transitions. The detection approach is applied to environmental time series from the Nile river in Aswan and the weather station Tuscaloosa, Alabama comprising documented changes. The method's performance confirms the approach as a powerful diagnostic tool to decipher multiple changes underlying direct climate observations. Finally, the kernel-based Bayesian inference approach is used to investigate a set of complex terrigenous dust records interpreted as climate indicators of the African region of the Plio-Pleistocene period. A detailed inference unravels multiple transitions underlying the indirect climate observations, that are interpreted as conjoint changes. The identified conjoint changes coincide with established global climate events. In particular, the two-step transition associated to the establishment of the modern Walker-Circulation contributes to the current discussion about the influence of paleoclimate changes on the environmental conditions in tropical and subtropical Africa at around two million years ago.}, language = {en} } @phdthesis{Rothe2020, author = {Rothe, Viktoria}, title = {Das Yamabe-Problem auf global-hyperbolischen Lorentz-Mannigfaltigkeiten}, doi = {10.25932/publishup-48601}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-486012}, school = {Universit{\"a}t Potsdam}, pages = {ix, 65}, year = {2020}, abstract = {Im Jahre 1960 behauptete Yamabe folgende Aussage bewiesen zu haben: Auf jeder kompakten Riemannschen Mannigfaltigkeit (M,g) der Dimension n ≥ 3 existiert eine zu g konform {\"a}quivalente Metrik mit konstanter Skalarkr{\"u}mmung. Diese Aussage ist {\"a}quivalent zur Existenz einer L{\"o}sung einer bestimmten semilinearen elliptischen Differentialgleichung, der Yamabe-Gleichung. 1968 fand Trudinger einen Fehler in seinem Beweis und infolgedessen besch{\"a}ftigten sich viele Mathematiker mit diesem nach Yamabe benannten Yamabe-Problem. In den 80er Jahren konnte durch die Arbeiten von Trudinger, Aubin und Schoen gezeigt werden, dass diese Aussage tats{\"a}chlich zutrifft. Dadurch ergeben sich viele Vorteile, z.B. kann beim Analysieren von konform invarianten partiellen Differentialgleichungen auf kompakten Riemannschen Mannigfaltigkeiten die Skalarkr{\"u}mmung als konstant vorausgesetzt werden. Es stellt sich nun die Frage, ob die entsprechende Aussage auch auf Lorentz-Mannigfaltigkeiten gilt. Das Lorentz'sche Yamabe Problem lautet somit: Existiert zu einer gegebenen r{\"a}umlich kompakten global-hyperbolischen Lorentz-Mannigfaltigkeit (M,g) eine zu g konform {\"a}quivalente Metrik mit konstanter Skalarkr{\"u}mmung? Das Ziel dieser Arbeit ist es, dieses Problem zu untersuchen. Bei der sich aus dieser Fragestellung ergebenden Yamabe-Gleichung handelt es sich um eine semilineare Wellengleichung, deren L{\"o}sung eine positive glatte Funktion ist und aus der sich der konforme Faktor ergibt. Um die f{\"u}r die Behandlung des Yamabe-Problems ben{\"o}tigten Grundlagen so allgemein wie m{\"o}glich zu halten, wird im ersten Teil dieser Arbeit die lokale Existenztheorie f{\"u}r beliebige semilineare Wellengleichungen f{\"u}r Schnitte auf Vektorb{\"u}ndeln im Rahmen eines Cauchy-Problems entwickelt. Hierzu wird der Umkehrsatz f{\"u}r Banachr{\"a}ume angewendet, um mithilfe von bereits existierenden Existenzergebnissen zu linearen Wellengleichungen, Existenzaussagen zu semilinearen Wellengleichungen machen zu k{\"o}nnen. Es wird bewiesen, dass, falls die Nichtlinearit{\"a}t bestimmte Bedingungen erf{\"u}llt, eine fast zeitglobale L{\"o}sung des Cauchy-Problems f{\"u}r kleine Anfangsdaten sowie eine zeitlokale L{\"o}sung f{\"u}r beliebige Anfangsdaten existiert. Der zweite Teil der Arbeit befasst sich mit der Yamabe-Gleichung auf global-hyperbolischen Lorentz-Mannigfaltigkeiten. Zuerst wird gezeigt, dass die Nichtlinearit{\"a}t der Yamabe-Gleichung die geforderten Bedingungen aus dem ersten Teil erf{\"u}llt, so dass, falls die Skalarkr{\"u}mmung der gegebenen Metrik nahe an einer Konstanten liegt, kleine Anfangsdaten existieren, so dass die Yamabe-Gleichung eine fast zeitglobale L{\"o}sung besitzt. Mithilfe von Energieabsch{\"a}tzungen wird anschließend f{\"u}r 4-dimensionale global-hyperbolische Lorentz-Mannigfaltigkeiten gezeigt, dass unter der Annahme, dass die konstante Skalarkr{\"u}mmung der konform {\"a}quivalenten Metrik nichtpositiv ist, eine zeitglobale L{\"o}sung der Yamabe-Gleichung existiert, die allerdings nicht notwendigerweise positiv ist. Außerdem wird gezeigt, dass, falls die H2-Norm der Skalarkr{\"u}mmung bez{\"u}glich der gegebenen Metrik auf einem kompakten Zeitintervall auf eine bestimmte Weise beschr{\"a}nkt ist, die L{\"o}sung positiv auf diesem Zeitintervall ist. Hierbei wird ebenfalls angenommen, dass die konstante Skalarkr{\"u}mmung der konform {\"a}quivalenten Metrik nichtpositiv ist. Falls zus{\"a}tzlich hierzu gilt, dass die Skalarkr{\"u}mmung bez{\"u}glich der gegebenen Metrik negativ ist und die Metrik gewisse Bedingungen erf{\"u}llt, dann ist die L{\"o}sung f{\"u}r alle Zeiten in einem kompakten Zeitintervall positiv, auf dem der Gradient der Skalarkr{\"u}mmung auf eine bestimmte Weise beschr{\"a}nkt ist. In beiden F{\"a}llen folgt unter den angef{\"u}hrten Bedingungen die Existenz einer zeitglobalen positiven L{\"o}sung, falls M = I x Σ f{\"u}r ein beschr{\"a}nktes offenes Intervall I ist. Zum Schluss wird f{\"u}r M = R x Σ ein Beispiel f{\"u}r die Nichtexistenz einer globalen positiven L{\"o}sung angef{\"u}hrt.}, language = {de} } @phdthesis{Kaganova2015, author = {Kaganova, Ekaterina}, title = {Das Lehrpotential von Schulbuchlehrtexten im Fach Mathematik}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-80705}, school = {Universit{\"a}t Potsdam}, pages = {287}, year = {2015}, abstract = {Das Schulbuch ist ein etablierter und bedeutender Bestandteil des Mathematikunterrichts. Lehrer nutzen es, um ihren Unterricht vorzubereiten und/oder zu gestalten; Sch{\"u}ler, um in selbigem zu lernen und zu bestehen, vielleicht sogar aus eigenem Interesse; Eltern, um sich dar{\"u}ber zu informieren, was ihr Kind eigentlich k{\"o}nnen soll und wie sie ihm gegebenenfalls helfen k{\"o}nnen. Dar{\"u}ber hinaus ist das Schulbuch ein markantes gesellschaftliches Produkt, dessen Zweck es ist, das Unterrichtsgeschehen zu steuern und zu beeinflussen. Damit ist es auch ein Anzeiger daf{\"u}r, was und wie im Mathematikunterricht gelehrt werden sollte und wird. Die Lehrtexte als zentrale Bestandteile von Schulb{\"u}chern verweisen in diesem Zusammenhang insbesondere auf die Phasen der Einf{\"u}hrung neuen Lernstoffs. Daraus legitimiert sich {\"u}bergreifend die Fragestellung, was und wie (gut) Mathematikschulbuchlehrtexte lehren bzw. was und wie (gut) adressierte Sch{\"u}ler aus ihnen (selbstst{\"a}ndig) lernen, d.h. Wissen erwerben k{\"o}nnen. Angesichts der komplexen und vielf{\"a}ltigen Bedeutung von Schulbuchlehrtexten verwundert es, dass die mathematikdidaktische Forschung bislang wenig Interesse an ihnen zeigt: Es fehlen sowohl eine theoretische Konzeption der Gr{\"o}ße ‚Lehrpotential eines schulmathematischen Lehrtextes' als auch ein analytisches Verfahren, um das anhand eines Mathematikschulbuchlehrtextes Verstehbare und Lernbare zu ermitteln. Mit der vorliegenden Arbeit wird sowohl in theoretisch-methodologischer als auch in empirischer Hinsicht der Versuch unternommen, diesen Defiziten zu begegnen. Dabei wird das ‚Lehrpotential eines Mathematikschulbuchlehrtextes' auf der Grundlage der kognitionspsychologischen Schematheorie und unter Einbeziehung textlinguistischer Ans{\"a}tze als eine textimmanente und analytisch zug{\"a}ngliche Gr{\"o}ße konzipiert. Anschließend wird das Lehrpotential von f{\"u}nf Lehrtexten ausgew{\"a}hlter aktueller Schulb{\"u}cher der Jahrgangsstufen 6 und 7 zu den Inhaltsbereichen ‚Br{\"u}che' und ‚lineare Funktionen' analysiert. Es zeigt sich, dass die untersuchten Lehrtexte aus deutschen Schulb{\"u}chern f{\"u}r Sch{\"u}ler sehr schwer verst{\"a}ndlich sind, d.h. es ist kompliziert, einigen Teiltexten im Rahmen des Gesamttextes einen Sinn abzugewinnen. Die Lehrtexte sind insbesondere dann kaum sinnhaft lesbar, wenn ein Sch{\"u}ler versucht, die mitgeteilten Sachverhalte zu verstehen, d.h. Antworten auf die Fragen zu erhalten, warum ein mathematischer Sachverhalt gerade so und nicht anders ist, wozu ein neuer Sachverhalt/Begriff gebraucht wird, wie das Neue mit bereits Bekanntem zusammenh{\"a}ngt usw. Deutlich zug{\"a}nglicher und sinnhafter erscheinen die Mathematikschulbuchlehrtexte hingegen unter der Annahme, dass ihre zentrale Botschaft in der Mitteilung besteht, welche Aufgabenstellungen in der jeweiligen Lehreinheit vorkommen und wie man sie bearbeitet. Demnach k{\"o}nnen Sch{\"u}ler anhand dieser Lehrtexte im Wesentlichen lernen, wie sie mit mathematischen Zeichen, die f{\"u}r sie kaum etwas bezeichnen, umgehen sollen. Die hier vorgelegten Analyseergebnisse gewinnen in einem soziologischen Kontext an Tragweite und Brisanz. So l{\"a}sst sich aus ihnen u.a. die These ableiten, dass die analysierten Lehrtexte keine ‚ungl{\"u}cklichen' Einzelf{\"a}lle sind, sondern dass die ‚Aufgabenorientierung in einem mathematischen Gewand' ein Charakteristikum typischer (deutscher) Mathematikschulbuchlehrtexte und - noch grunds{\"a}tzlicher - einen Wesenszug typischer schulmathematischer Kommunikation darstellt.}, language = {de} } @phdthesis{Schanner2022, author = {Schanner, Maximilian Arthus}, title = {Correlation based modeling of the archeomagnetic field}, doi = {10.25932/publishup-55587}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-555875}, school = {Universit{\"a}t Potsdam}, pages = {vii, 146}, year = {2022}, abstract = {The geomagnetic main field is vital for live on Earth, as it shields our habitat against the solar wind and cosmic rays. It is generated by the geodynamo in the Earth's outer core and has a rich dynamic on various timescales. Global models of the field are used to study the interaction of the field and incoming charged particles, but also to infer core dynamics and to feed numerical simulations of the geodynamo. Modern satellite missions, such as the SWARM or the CHAMP mission, support high resolution reconstructions of the global field. From the 19 th century on, a global network of magnetic observatories has been established. It is growing ever since and global models can be constructed from the data it provides. Geomagnetic field models that extend further back in time rely on indirect observations of the field, i.e. thermoremanent records such as burnt clay or volcanic rocks and sediment records from lakes and seas. These indirect records come with (partially very large) uncertainties, introduced by the complex measurement methods and the dating procedure. Focusing on thermoremanent records only, the aim of this thesis is the development of a new modeling strategy for the global geomagnetic field during the Holocene, which takes the uncertainties into account and produces realistic estimates of the reliability of the model. This aim is approached by first considering snapshot models, in order to address the irregular spatial distribution of the records and the non-linear relation of the indirect observations to the field itself. In a Bayesian setting, a modeling algorithm based on Gaussian process regression is developed and applied to binned data. The modeling algorithm is then extended to the temporal domain and expanded to incorporate dating uncertainties. Finally, the algorithm is sequentialized to deal with numerical challenges arising from the size of the Holocene dataset. The central result of this thesis, including all of the aspects mentioned, is a new global geomagnetic field model. It covers the whole Holocene, back until 12000 BCE, and we call it ArchKalmag14k. When considering the uncertainties that are produced together with the model, it is evident that before 6000 BCE the thermoremanent database is not sufficient to support global models. For times more recent, ArchKalmag14k can be used to analyze features of the field under consideration of posterior uncertainties. The algorithm for generating ArchKalmag14k can be applied to different datasets and is provided to the community as an open source python package.}, language = {en} } @phdthesis{Mauerberger2022, author = {Mauerberger, Stefan}, title = {Correlation based Bayesian modeling}, doi = {10.25932/publishup-53782}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-537827}, school = {Universit{\"a}t Potsdam}, pages = {x, 128}, year = {2022}, abstract = {The motivation for this work was the question of reliability and robustness of seismic tomography. The problem is that many earth models exist which can describe the underlying ground motion records equally well. Most algorithms for reconstructing earth models provide a solution, but rarely quantify their variability. If there is no way to verify the imaged structures, an interpretation is hardly reliable. The initial idea was to explore the space of equivalent earth models using Bayesian inference. However, it quickly became apparent that the rigorous quantification of tomographic uncertainties could not be accomplished within the scope of a dissertation. In order to maintain the fundamental concept of statistical inference, less complex problems from the geosciences are treated instead. This dissertation aims to anchor Bayesian inference more deeply in the geosciences and to transfer knowledge from applied mathematics. The underlying idea is to use well-known methods and techniques from statistics to quantify the uncertainties of inverse problems in the geosciences. This work is divided into three parts: Part I introduces the necessary mathematics and should be understood as a kind of toolbox. With a physical application in mind, this section provides a compact summary of all methods and techniques used. The introduction of Bayesian inference makes the beginning. Then, as a special case, the focus is on regression with Gaussian processes under linear transformations. The chapters on the derivation of covariance functions and the approximation of non-linearities are discussed in more detail. Part II presents two proof of concept studies in the field of seismology. The aim is to present the conceptual application of the introduced methods and techniques with moderate complexity. The example about traveltime tomography applies the approximation of non-linear relationships. The derivation of a covariance function using the wave equation is shown in the example of a damped vibrating string. With these two synthetic applications, a consistent concept for the quantification of modeling uncertainties has been developed. Part III presents the reconstruction of the Earth's archeomagnetic field. This application uses the whole toolbox presented in Part I and is correspondingly complex. The modeling of the past 1000 years is based on real data and reliably quantifies the spatial modeling uncertainties. The statistical model presented is widely used and is under active development. The three applications mentioned are intentionally kept flexible to allow transferability to similar problems. The entire work focuses on the non-uniqueness of inverse problems in the geosciences. It is intended to be of relevance to those interested in the concepts of Bayesian inference.}, language = {en} } @phdthesis{Penisson2010, author = {P{\´e}nisson, Sophie}, title = {Conditional limit theorems for multitype branching processes and illustration in epidemiological risk analysis}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-45307}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {This thesis is concerned with the issue of extinction of populations composed of different types of individuals, and their behavior before extinction and in case of a very late extinction. We approach this question firstly from a strictly probabilistic viewpoint, and secondly from the standpoint of risk analysis related to the extinction of a particular model of population dynamics. In this context we propose several statistical tools. The population size is modeled by a branching process, which is either a continuous-time multitype Bienaym{\´e}-Galton-Watson process (BGWc), or its continuous-state counterpart, the multitype Feller diffusion process. We are interested in different kinds of conditioning on non-extinction, and in the associated equilibrium states. These ways of conditioning have been widely studied in the monotype case. However the literature on multitype processes is much less extensive, and there is no systematic work establishing connections between the results for BGWc processes and those for Feller diffusion processes. In the first part of this thesis, we investigate the behavior of the population before its extinction by conditioning the associated branching process X_t on non-extinction (X_t≠0), or more generally on non-extinction in a near future 0≤θ<∞ (X_{t+θ}≠0), and by letting t tend to infinity. We prove the result, new in the multitype framework and for θ>0, that this limit exists and is non-degenerate. This reflects a stationary behavior for the dynamics of the population conditioned on non-extinction, and provides a generalization of the so-called Yaglom limit, corresponding to the case θ=0. In a second step we study the behavior of the population in case of a very late extinction, obtained as the limit when θ tends to infinity of the process conditioned by X_{t+θ}≠0. The resulting conditioned process is a known object in the monotype case (sometimes referred to as Q-process), and has also been studied when X_t is a multitype Feller diffusion process. We investigate the not yet considered case where X_t is a multitype BGWc process and prove the existence of the associated Q-process. In addition, we examine its properties, including the asymptotic ones, and propose several interpretations of the process. Finally, we are interested in interchanging the limits in t and θ, as well as in the not yet studied commutativity of these limits with respect to the high-density-type relationship between BGWc processes and Feller processes. We prove an original and exhaustive list of all possible exchanges of limit (long-time limit in t, increasing delay of extinction θ, diffusion limit). The second part of this work is devoted to the risk analysis related both to the extinction of a population and to its very late extinction. We consider a branching population model (arising notably in the epidemiological context) for which a parameter related to the first moments of the offspring distribution is unknown. We build several estimators adapted to different stages of evolution of the population (phase growth, decay phase, and decay phase when extinction is expected very late), and prove moreover their asymptotic properties (consistency, normality). In particular, we build a least squares estimator adapted to the Q-process, allowing a prediction of the population development in the case of a very late extinction. This would correspond to the best or to the worst-case scenario, depending on whether the population is threatened or invasive. These tools enable us to study the extinction phase of the Bovine Spongiform Encephalopathy epidemic in Great Britain, for which we estimate the infection parameter corresponding to a possible source of horizontal infection persisting after the removal in 1988 of the major route of infection (meat and bone meal). This allows us to predict the evolution of the spread of the disease, including the year of extinction, the number of future cases and the number of infected animals. In particular, we produce a very fine analysis of the evolution of the epidemic in the unlikely event of a very late extinction.}, language = {en} } @phdthesis{Khalil2018, author = {Khalil, Sara}, title = {Boundary Value Problems on Manifolds with Singularities}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-419018}, school = {Universit{\"a}t Potsdam}, pages = {10, 160}, year = {2018}, abstract = {In the thesis there are constructed new quantizations for pseudo-differential boundary value problems (BVPs) on manifolds with edge. The shape of operators comes from Boutet de Monvel's calculus which exists on smooth manifolds with boundary. The singular case, here with edge and boundary, is much more complicated. The present approach simplifies the operator-valued symbolic structures by using suitable Mellin quantizations on infinite stretched model cones of wedges with boundary. The Mellin symbols themselves are, modulo smoothing ones, with asymptotics, holomorphic in the complex Mellin covariable. One of the main results is the construction of parametrices of elliptic elements in the corresponding operator algebra, including elliptic edge conditions.}, language = {en} } @phdthesis{Hannes2022, author = {Hannes, Sebastian}, title = {Boundary Value Problems for the Lorentzian Dirac Operator}, doi = {10.25932/publishup-54839}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-548391}, school = {Universit{\"a}t Potsdam}, pages = {67}, year = {2022}, abstract = {The index theorem for elliptic operators on a closed Riemannian manifold by Atiyah and Singer has many applications in analysis, geometry and topology, but it is not suitable for a generalization to a Lorentzian setting. In the case where a boundary is present Atiyah, Patodi and Singer provide an index theorem for compact Riemannian manifolds by introducing non-local boundary conditions obtained via the spectral decomposition of an induced boundary operator, so called APS boundary conditions. B{\"a}r and Strohmaier prove a Lorentzian version of this index theorem for the Dirac operator on a manifold with boundary by utilizing results from APS and the characterization of the spectral flow by Phillips. In their case the Lorentzian manifold is assumed to be globally hyperbolic and spatially compact, and the induced boundary operator is given by the Riemannian Dirac operator on a spacelike Cauchy hypersurface. Their results show that imposing APS boundary conditions for these boundary operator will yield a Fredholm operator with a smooth kernel and its index can be calculated by a formula similar to the Riemannian case. Back in the Riemannian setting, B{\"a}r and Ballmann provide an analysis of the most general kind of boundary conditions that can be imposed on a first order elliptic differential operator that will still yield regularity for solutions as well as Fredholm property for the resulting operator. These boundary conditions can be thought of as deformations to the graph of a suitable operator mapping APS boundary conditions to their orthogonal complement. This thesis aims at applying the boundary conditions found by B{\"a}r and Ballmann to a Lorentzian setting to understand more general types of boundary conditions for the Dirac operator, conserving Fredholm property as well as providing regularity results and relative index formulas for the resulting operators. As it turns out, there are some differences in applying these graph-type boundary conditions to the Lorentzian Dirac operator when compared to the Riemannian setting. It will be shown that in contrast to the Riemannian case, going from a Fredholm boundary condition to its orthogonal complement works out fine in the Lorentzian setting. On the other hand, in order to deduce Fredholm property and regularity of solutions for graph-type boundary conditions, additional assumptions for the deformation maps need to be made. The thesis is organized as follows. In chapter 1 basic facts about Lorentzian and Riemannian spin manifolds, their spinor bundles and the Dirac operator are listed. These will serve as a foundation to define the setting and prove the results of later chapters. Chapter 2 defines the general notion of boundary conditions for the Dirac operator used in this thesis and introduces the APS boundary conditions as well as their graph type deformations. Also the role of the wave evolution operator in finding Fredholm boundary conditions is analyzed and these boundary conditions are connected to notion of Fredholm pairs in a given Hilbert space. Chapter 3 focuses on the principal symbol calculation of the wave evolution operator and the results are used to proof Fredholm property as well as regularity of solutions for suitable graph-type boundary conditions. Also sufficient conditions are derived for (pseudo-)local boundary conditions imposed on the Dirac operator to yield a Fredholm operator with a smooth solution space. In the last chapter 4, a few examples of boundary conditions are calculated applying the results of previous chapters. Restricting to special geometries and/or boundary conditions, results can be obtained that are not covered by the more general statements, and it is shown that so-called transmission conditions behave very differently than in the Riemannian setting.}, language = {en} } @phdthesis{MalemShinitski2023, author = {Malem-Shinitski, Noa}, title = {Bayesian inference and modeling for point processes with applications from neuronal activity to scene viewing}, doi = {10.25932/publishup-61495}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-614952}, school = {Universit{\"a}t Potsdam}, pages = {vii, 129}, year = {2023}, abstract = {Point processes are a common methodology to model sets of events. From earthquakes to social media posts, from the arrival times of neuronal spikes to the timing of crimes, from stock prices to disease spreading -- these phenomena can be reduced to the occurrences of events concentrated in points. Often, these events happen one after the other defining a time--series. Models of point processes can be used to deepen our understanding of such events and for classification and prediction. Such models include an underlying random process that generates the events. This work uses Bayesian methodology to infer the underlying generative process from observed data. Our contribution is twofold -- we develop new models and new inference methods for these processes. We propose a model that extends the family of point processes where the occurrence of an event depends on the previous events. This family is known as Hawkes processes. Whereas in most existing models of such processes, past events are assumed to have only an excitatory effect on future events, we focus on the newly developed nonlinear Hawkes process, where past events could have excitatory and inhibitory effects. After defining the model, we present its inference method and apply it to data from different fields, among others, to neuronal activity. The second model described in the thesis concerns a specific instance of point processes --- the decision process underlying human gaze control. This process results in a series of fixated locations in an image. We developed a new model to describe this process, motivated by the known Exploration--Exploitation dilemma. Alongside the model, we present a Bayesian inference algorithm to infer the model parameters. Remaining in the realm of human scene viewing, we identify the lack of best practices for Bayesian inference in this field. We survey four popular algorithms and compare their performances for parameter inference in two scan path models. The novel models and inference algorithms presented in this dissertation enrich the understanding of point process data and allow us to uncover meaningful insights.}, language = {en} } @phdthesis{Maier2021, author = {Maier, Corinna}, title = {Bayesian data assimilation and reinforcement learning for model-informed precision dosing in oncology}, doi = {10.25932/publishup-51587}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-515870}, school = {Universit{\"a}t Potsdam}, pages = {x, 138}, year = {2021}, abstract = {While patients are known to respond differently to drug therapies, current clinical practice often still follows a standardized dosage regimen for all patients. For drugs with a narrow range of both effective and safe concentrations, this approach may lead to a high incidence of adverse events or subtherapeutic dosing in the presence of high patient variability. Model-informedprecision dosing (MIPD) is a quantitative approach towards dose individualization based on mathematical modeling of dose-response relationships integrating therapeutic drug/biomarker monitoring (TDM) data. MIPD may considerably improve the efficacy and safety of many drug therapies. Current MIPD approaches, however, rely either on pre-calculated dosing tables or on simple point predictions of the therapy outcome. These approaches lack a quantification of uncertainties and the ability to account for effects that are delayed. In addition, the underlying models are not improved while applied to patient data. Therefore, current approaches are not well suited for informed clinical decision-making based on a differentiated understanding of the individually predicted therapy outcome. The objective of this thesis is to develop mathematical approaches for MIPD, which (i) provide efficient fully Bayesian forecasting of the individual therapy outcome including associated uncertainties, (ii) integrate Markov decision processes via reinforcement learning (RL) for a comprehensive decision framework for dose individualization, (iii) allow for continuous learning across patients and hospitals. Cytotoxic anticancer chemotherapy with its major dose-limiting toxicity, neutropenia, serves as a therapeutically relevant application example. For more comprehensive therapy forecasting, we apply Bayesian data assimilation (DA) approaches, integrating patient-specific TDM data into mathematical models of chemotherapy-induced neutropenia that build on prior population analyses. The value of uncertainty quantification is demonstrated as it allows reliable computation of the patient-specific probabilities of relevant clinical quantities, e.g., the neutropenia grade. In view of novel home monitoring devices that increase the amount of TDM data available, the data processing of sequential DA methods proves to be more efficient and facilitates handling of the variability between dosing events. By transferring concepts from DA and RL we develop novel approaches for MIPD. While DA-guided dosing integrates individualized uncertainties into dose selection, RL-guided dosing provides a framework to consider delayed effects of dose selections. The combined DA-RL approach takes into account both aspects simultaneously and thus represents a holistic approach towards MIPD. Additionally, we show that RL can be used to gain insights into important patient characteristics for dose selection. The novel dosing strategies substantially reduce the occurrence of both subtherapeutic and life-threatening neutropenia grades in a simulation study based on a recent clinical study (CEPAC-TDM trial) compared to currently used MIPD approaches. If MIPD is to be implemented in routine clinical practice, a certain model bias with respect to the underlying model is inevitable, as the models are typically based on data from comparably small clinical trials that reflect only to a limited extent the diversity in real-world patient populations. We propose a sequential hierarchical Bayesian inference framework that enables continuous cross-patient learning to learn the underlying model parameters of the target patient population. It is important to note that the approach only requires summary information of the individual patient data to update the model. This separation of the individual inference from population inference enables implementation across different centers of care. The proposed approaches substantially improve current MIPD approaches, taking into account new trends in health care and aspects of practical applicability. They enable progress towards more informed clinical decision-making, ultimately increasing patient benefits beyond the current practice.}, language = {en} } @phdthesis{Rosenberger2006, author = {Rosenberger, Elke}, title = {Asymptotic spectral analysis and tunnelling for a class of difference operators}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-7393}, school = {Universit{\"a}t Potsdam}, year = {2006}, abstract = {We analyze the asymptotic behavior in the limit epsilon to zero for a wide class of difference operators H_epsilon = T_epsilon + V_epsilon with underlying multi-well potential. They act on the square summable functions on the lattice (epsilon Z)^d. We start showing the validity of an harmonic approximation and construct WKB-solutions at the wells. Then we construct a Finslerian distance d induced by H and show that short integral curves are geodesics and d gives the rate for the exponential decay of Dirichlet eigenfunctions. In terms of this distance, we give sharp estimates for the interaction between the wells and construct the interaction matrix.}, subject = {Mathematische Physik}, language = {en} } @phdthesis{Trappmann2007, author = {Trappmann, Henryk}, title = {Arborescent numbers : higher arithmetic operations and division trees}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-15247}, school = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {The overall program "arborescent numbers" is to similarly perform the constructions from the natural numbers (N) to the positive fractional numbers (Q+) to positive real numbers (R+) beginning with (specific) binary trees instead of natural numbers. N can be regarded as the associative binary trees. The binary trees B and the left-commutative binary trees P allow the hassle-free definition of arbitrary high arithmetic operations (hyper ... hyperpowers). To construct the division trees the algebraic structure "coppice" is introduced which is a group with an addition over which the multiplication is right-distributive. Q+ is the initial associative coppice. The present work accomplishes one step in the program "arborescent numbers". That is the construction of the arborescent equivalent(s) of the positive fractional numbers. These equivalents are the "division binary trees" and the "fractional trees". A representation with decidable word problem for each of them is given. The set of functions f:R1->R1 generated from identity by taking powers is isomorphic to P and can be embedded into a coppice by taking inverses.}, language = {en} } @phdthesis{Le2006, author = {Le, Tuan Anh}, title = {Applying realistic mathematics education in Vietnam : teaching middle school geometry}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-13480}, school = {Universit{\"a}t Potsdam}, year = {2006}, abstract = {Since 1971, the Freudenthal Institute has developed an approach to mathematics education named Realistic Mathematics Education (RME). The philosophy of RME is based on Hans Freudenthal's concept of 'mathematics as a human activity'. Prof. Hans Freudenthal (1905-1990), a mathematician and educator, believes that 'ready-made mathematics' should not be taught in school. By contrast, he urges that students should be offered 'realistic situations' so that they can rediscover from informal to formal mathematics. Although mathematics education in Vietnam has some achievements, it still encounters several challenges. Recently, the reform of teaching methods has become an urgent task in Vietnam. It appears that Vietnamese mathematics education lacks necessary theoretical frameworks. At first sight, the philosophy of RME is suitable for the orientation of the teaching method reform in Vietnam. However, the potential of RME for mathematics education as well as the ability of applying RME to teaching mathematics is still questionable in Vietnam. The primary aim of this dissertation is to research into abilities of applying RME to teaching and learning mathematics in Vietnam and to answer the question "how could RME enrich Vietnamese mathematics education?". This research will emphasize teaching geometry in Vietnamese middle school. More specifically, the dissertation will implement the following research tasks: • Analyzing the characteristics of Vietnamese mathematics education in the 'reformed' period (from the early 1980s to the early 2000s) and at present; • Implementing a survey of 152 middle school teachers' ideas from several Vietnamese provinces and cities about Vietnamese mathematics education; • Analyzing RME, including Freudenthal's viewpoints for RME and the characteristics of RME; • Discussing how to design RME-based lessons and how to apply these lessons to teaching and learning in Vietnam; • Experimenting RME-based lessons in a Vietnamese middle school; • Analyzing the feedback from the students' worksheets and the teachers' reports, including the potentials of RME-based lessons for Vietnamese middle school and the difficulties the teachers and their students encountered with RME-based lessons; • Discussing proposals for applying RME-based lessons to teaching and learning mathematics in Vietnam, including making suggestions for teachers who will apply these lessons to their teaching and designing courses for in-service teachers and teachers-in training. This research reveals that although teachers and students may encounter some obstacles while teaching and learning with RME-based lesson, RME could become a potential approach for mathematics education and could be effectively applied to teaching and learning mathematics in Vietnamese school.}, language = {en} } @phdthesis{Abed2010, author = {Abed, Jamil}, title = {An iterative approach to operators on manifolds with singularities}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-44757}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {We establish elements of a new approach to ellipticity and parametrices within operator algebras on manifolds with higher singularities, only based on some general axiomatic requirements on parameter-dependent operators in suitable scales of spaes. The idea is to model an iterative process with new generations of parameter-dependent operator theories, together with new scales of spaces that satisfy analogous requirements as the original ones, now on a corresponding higher level. The "full" calculus involves two separate theories, one near the tip of the corner and another one at the conical exit to infinity. However, concerning the conical exit to infinity, we establish here a new concrete calculus of edge-degenerate operators which can be iterated to higher singularities.}, language = {en} } @phdthesis{Zass2021, author = {Zass, Alexander}, title = {A multifaceted study of marked Gibbs point processes}, doi = {10.25932/publishup-51277}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-512775}, school = {Universit{\"a}t Potsdam}, pages = {vii, 104}, year = {2021}, abstract = {This thesis focuses on the study of marked Gibbs point processes, in particular presenting some results on their existence and uniqueness, with ideas and techniques drawn from different areas of statistical mechanics: the entropy method from large deviations theory, cluster expansion and the Kirkwood--Salsburg equations, the Dobrushin contraction principle and disagreement percolation. We first present an existence result for infinite-volume marked Gibbs point processes. More precisely, we use the so-called entropy method (and large-deviation tools) to construct marked Gibbs point processes in R^d under quite general assumptions. In particular, the random marks belong to a general normed space S and are not bounded. Moreover, we allow for interaction functionals that may be unbounded and whose range is finite but random. The entropy method relies on showing that a family of finite-volume Gibbs point processes belongs to sequentially compact entropy level sets, and is therefore tight. We then present infinite-dimensional Langevin diffusions, that we put in interaction via a Gibbsian description. In this setting, we are able to adapt the general result above to show the existence of the associated infinite-volume measure. We also study its correlation functions via cluster expansion techniques, and obtain the uniqueness of the Gibbs process for all inverse temperatures β and activities z below a certain threshold. This method relies in first showing that the correlation functions of the process satisfy a so-called Ruelle bound, and then using it to solve a fixed point problem in an appropriate Banach space. The uniqueness domain we obtain consists then of the model parameters z and β for which such a problem has exactly one solution. Finally, we explore further the question of uniqueness of infinite-volume Gibbs point processes on R^d, in the unmarked setting. We present, in the context of repulsive interactions with a hard-core component, a novel approach to uniqueness by applying the discrete Dobrushin criterion to the continuum framework. We first fix a discretisation parameter a>0 and then study the behaviour of the uniqueness domain as a goes to 0. With this technique we are able to obtain explicit thresholds for the parameters z and β, which we then compare to existing results coming from the different methods of cluster expansion and disagreement percolation. Throughout this thesis, we illustrate our theoretical results with various examples both from classical statistical mechanics and stochastic geometry.}, language = {en} }