@phdthesis{Goswami2014, author = {Goswami, Bedartha}, title = {Uncertainties in climate data analysis}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-78312}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Scientific inquiry requires that we formulate not only what we know, but also what we do not know and by how much. In climate data analysis, this involves an accurate specification of measured quantities and a consequent analysis that consciously propagates the measurement errors at each step. The dissertation presents a thorough analytical method to quantify errors of measurement inherent in paleoclimate data. An additional focus are the uncertainties in assessing the coupling between different factors that influence the global mean temperature (GMT). Paleoclimate studies critically rely on `proxy variables' that record climatic signals in natural archives. However, such proxy records inherently involve uncertainties in determining the age of the signal. We present a generic Bayesian approach to analytically determine the proxy record along with its associated uncertainty, resulting in a time-ordered sequence of correlated probability distributions rather than a precise time series. We further develop a recurrence based method to detect dynamical events from the proxy probability distributions. The methods are validated with synthetic examples and demonstrated with real-world proxy records. The proxy estimation step reveals the interrelations between proxy variability and uncertainty. The recurrence analysis of the East Asian Summer Monsoon during the last 9000 years confirms the well-known `dry' events at 8200 and 4400 BP, plus an additional significantly dry event at 6900 BP. We also analyze the network of dependencies surrounding GMT. We find an intricate, directed network with multiple links between the different factors at multiple time delays. We further uncover a significant feedback from the GMT to the El Ni{\~n}o Southern Oscillation at quasi-biennial timescales. The analysis highlights the need of a more nuanced formulation of influences between different climatic factors, as well as the limitations in trying to estimate such dependencies.}, language = {en} } @phdthesis{Bergner2011, author = {Bergner, Andr{\´e}}, title = {Synchronization in complex systems with multiple time scales}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-53407}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {In the present work synchronization phenomena in complex dynamical systems exhibiting multiple time scales have been analyzed. Multiple time scales can be active in different manners. Three different systems have been analyzed with different methods from data analysis. The first system studied is a large heterogenous network of bursting neurons, that is a system with two predominant time scales, the fast firing of action potentials (spikes) and the burst of repetitive spikes followed by a quiescent phase. This system has been integrated numerically and analyzed with methods based on recurrence in phase space. An interesting result are the different transitions to synchrony found in the two distinct time scales. Moreover, an anomalous synchronization effect can be observed in the fast time scale, i.e. there is range of the coupling strength where desynchronization occurs. The second system analyzed, numerically as well as experimentally, is a pair of coupled CO₂ lasers in a chaotic bursting regime. This system is interesting due to its similarity with epidemic models. We explain the bursts by different time scales generated from unstable periodic orbits embedded in the chaotic attractor and perform a synchronization analysis of these different orbits utilizing the continuous wavelet transform. We find a diverse route to synchrony of these different observed time scales. The last system studied is a small network motif of limit cycle oscillators. Precisely, we have studied a hub motif, which serves as elementary building block for scale-free networks, a type of network found in many real world applications. These hubs are of special importance for communication and information transfer in complex networks. Here, a detailed study on the mechanism of synchronization in oscillatory networks with a broad frequency distribution has been carried out. In particular, we find a remote synchronization of nodes in the network which are not directly coupled. We also explain the responsible mechanism and its limitations and constraints. Further we derive an analytic expression for it and show that information transmission in pure phase oscillators, such as the Kuramoto type, is limited. In addition to the numerical and analytic analysis an experiment consisting of electrical circuits has been designed. The obtained results confirm the former findings.}, language = {en} } @phdthesis{Thiel2004, author = {Thiel, Marco}, title = {Recurrences : exploiting naturally occurring analogues}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0001633}, school = {Universit{\"a}t Potsdam}, year = {2004}, abstract = {In der vorliegenden Arbeit wird die Wiederkehr im Phasenraum ausgenutzt. Dabei werden drei Hauptresultate besprochen. 1. Die Wiederkehr erlaubt die Vorhersagbarkeit des Systems zu quantifizieren. 2. Die Wiederkehr enthaelt (unter bestimmten Voraussetzungen) s{\"a}mtliche relevante Information {\"u}ber die Dynamik im Phasenraum 3. Die Wiederkehr erlaubt die Erzeugung dynamischer Ersatzdaten.}, language = {en} } @phdthesis{Holler2014, author = {Holler, Markus}, title = {Photon reconstruction for the H.E.S.S. 28 m telescope and analysis of Crab Nebula and galactic centre observations}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72099}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {In the presented thesis, the most advanced photon reconstruction technique of ground-based γ-ray astronomy is adapted to the H.E.S.S. 28 m telescope. The method is based on a semi-analytical model of electromagnetic particle showers in the atmosphere. The properties of cosmic γ-rays are reconstructed by comparing the camera image of the telescope with the Cherenkov emission that is expected from the shower model. To suppress the dominant background from charged cosmic rays, events are selected based on several criteria. The performance of the analysis is evaluated with simulated events. The method is then applied to two sources that are known to emit γ-rays. The first of these is the Crab Nebula, the standard candle of ground-based γ-ray astronomy. The results of this source confirm the expected performance of the reconstruction method, where the much lower energy threshold compared to H.E.S.S. I is of particular importance. A second analysis is performed on the region around the Galactic Centre. The analysis results emphasise the capabilities of the new telescope to measure γ-rays in an energy range that is interesting for both theoretical and experimental astrophysics. The presented analysis features the lowest energy threshold that has ever been reached in ground-based γ-ray astronomy, opening a new window to the precise measurement of the physical properties of time-variable sources at energies of several tens of GeV.}, language = {en} } @phdthesis{Rosenblum2003, author = {Rosenblum, Michael}, title = {Phase synchronization of chaotic systems : from theory to experimental applications}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0000682}, school = {Universit{\"a}t Potsdam}, year = {2003}, abstract = {In einem klassischen Kontext bedeutet Synchronisierung die Anpassung der Rhythmen von selbst-erregten periodischen Oszillatoren aufgrund ihrer schwachen Wechselwirkung. Der Begriff der Synchronisierung geht auf den ber{\"u}hmten niederl{\"a}andischen Wissenschaftler Christiaan Huygens im 17. Jahrhundert zur{\"u}ck, der {\"u}ber seine Beobachtungen mit Pendeluhren berichtete. Wenn zwei solche Uhren auf der selben Unterlage plaziert wurden, schwangen ihre Pendel in perfekter {\"U}bereinstimmung. Mathematisch bedeutet das, daß infolge der Kopplung, die Uhren mit gleichen Frequenzen und engverwandten Phasen zu oszillieren begannen. Als wahrscheinlich {\"a}ltester beobachteter nichtlinearer Effekt wurde die Synchronisierung erst nach den Arbeiten von E. V. Appleton und B. Van der Pol gegen 1920 verstanden, die die Synchronisierung in Triodengeneratoren systematisch untersucht haben. Seitdem wurde die Theorie gut entwickelt, und hat viele Anwendungen gefunden. Heutzutage weiss man, dass bestimmte, sogar ziemlich einfache, Systeme, ein chaotisches Verhalten aus{\"u}ben k{\"o}nnen. Dies bedeutet, dass ihre Rhythmen unregelm{\"a}ßig sind und nicht durch nur eine einzige Frequenz charakterisiert werden k{\"o}nnen. Wie in der Habilitationsarbeit gezeigt wurde, kann man jedoch den Begriff der Phase und damit auch der Synchronisierung auf chaotische Systeme ausweiten. Wegen ihrer sehr schwachen Wechselwirkung treten Beziehungen zwischen den Phasen und den gemittelten Frequenzen auf und f{\"u}hren damit zur {\"U}bereinstimmung der immer noch unregelm{\"a}ßigen Rhythmen. Dieser Effekt, sogenannter Phasensynchronisierung, konnte sp{\"a}ter in Laborexperimenten anderer wissenschaftlicher Gruppen best{\"a}tigt werden. Das Verst{\"a}ndnis der Synchronisierung unregelm{\"a}ßiger Oszillatoren erlaubte es uns, wichtige Probleme der Datenanalyse zu untersuchen. Ein Hauptbeispiel ist das Problem der Identifikation schwacher Wechselwirkungen zwischen Systemen, die nur eine passive Messung erlauben. Diese Situation trifft h{\"a}ufig in lebenden Systemen auf, wo Synchronisierungsph{\"a}nomene auf jedem Niveau erscheinen - auf der Ebene von Zellen bis hin zu makroskopischen physiologischen Systemen; in normalen Zust{\"a}nden und auch in Zust{\"a}nden ernster Pathologie. Mit unseren Methoden konnten wir eine Anpassung in den Rhythmen von Herz-Kreislauf und Atmungssystem in Menschen feststellen, wobei der Grad ihrer Interaktion mit der Reifung zunimmt. Weiterhin haben wir unsere Algorithmen benutzt, um die Gehirnaktivit{\"a}t von an Parkinson Erkrankten zu analysieren. Die Ergebnisse dieser Kollaboration mit Neurowissenschaftlern zeigen, dass sich verschiedene Gehirnbereiche genau vor Beginn des pathologischen Zitterns synchronisieren. Außerdem gelang es uns, die f{\"u}r das Zittern verantwortliche Gehirnregion zu lokalisieren.}, language = {en} } @misc{Dietrich2008, type = {Master Thesis}, author = {Dietrich, Jan Philipp}, title = {Phase Space Reconstruction using the frequency domain : a generalization of actual methods}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-50738}, school = {Universit{\"a}t Potsdam}, year = {2008}, abstract = {Phase Space Reconstruction is a method that allows to reconstruct the phase space of a system using only an one dimensional time series as input. It can be used for calculating Lyapunov-exponents and detecting chaos. It helps to understand complex dynamics and their behavior. And it can reproduce datasets which were not measured. There are many different methods which produce correct reconstructions such as time-delay, Hilbert-transformation, derivation and integration. The most used one is time-delay but all methods have special properties which are useful in different situations. Hence, every reconstruction method has some situations where it is the best choice. Looking at all these different methods the questions are: Why can all these different looking methods be used for the same purpose? Is there any connection between all these functions? The answer is found in the frequency domain : Performing a Fourier transformation all these methods getting a similar shape: Every presented reconstruction method can be described as a multiplication in the frequency domain with a frequency-depending reconstruction function. This structure is also known as a filter. From this point of view every reconstructed dimension can be seen as a filtered version of the measured time series. It contains the original data but applies just a new focus: Some parts are amplified and other parts are reduced. Furthermore I show, that not every function can be used for reconstruction. In the thesis three characteristics are identified, which are mandatory for the reconstruction function. Under consideration of these restrictions one gets a whole bunch of new reconstruction functions. So it is possible to reduce noise within the reconstruction process itself or to use some advantages of already known reconstructions methods while suppressing unwanted characteristics of it.}, language = {en} } @phdthesis{Zoeller2005, author = {Z{\"o}ller, Gert}, title = {Critical states of seismicity : modeling and data analysis}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-7427}, school = {Universit{\"a}t Potsdam}, year = {2005}, abstract = {The occurrence of earthquakes is characterized by a high degree of spatiotemporal complexity. Although numerous patterns, e.g. fore- and aftershock sequences, are well-known, the underlying mechanisms are not observable and thus not understood. Because the recurrence times of large earthquakes are usually decades or centuries, the number of such events in corresponding data sets is too small to draw conclusions with reasonable statistical significance. Therefore, the present study combines both, numerical modeling and analysis of real data in order to unveil the relationships between physical mechanisms and observational quantities. The key hypothesis is the validity of the so-called "critical point concept" for earthquakes, which assumes large earthquakes to occur as phase transitions in a spatially extended many-particle system, similar to percolation models. New concepts are developed to detect critical states in simulated and in natural data sets. The results indicate that important features of seismicity like the frequency-size distribution and the temporal clustering of earthquakes depend on frictional and structural fault parameters. In particular, the degree of quenched spatial disorder (the "roughness") of a fault zone determines whether large earthquakes occur quasiperiodically or more clustered. This illustrates the power of numerical models in order to identify regions in parameter space, which are relevant for natural seismicity. The critical point concept is verified for both, synthetic and natural seismicity, in terms of a critical state which precedes a large earthquake: a gradual roughening of the (unobservable) stress field leads to a scale-free (observable) frequency-size distribution. Furthermore, the growth of the spatial correlation length and the acceleration of the seismic energy release prior to large events is found. The predictive power of these precursors is, however, limited. Instead of forecasting time, location, and magnitude of individual events, a contribution to a broad multiparameter approach is encouraging.}, subject = {Seismizit{\"a}t}, language = {en} } @phdthesis{Balzer2014, author = {Balzer, Arnim}, title = {Crab flare observations with H.E.S.S. phase II}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72545}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {The H.E.S.S. array is a third generation Imaging Atmospheric Cherenkov Telescope (IACT) array. It is located in the Khomas Highland in Namibia, and measures very high energy (VHE) gamma-rays. In Phase I, the array started data taking in 2004 with its four identical 13 m telescopes. Since then, H.E.S.S. has emerged as the most successful IACT experiment to date. Among the almost 150 sources of VHE gamma-ray radiation found so far, even the oldest detection, the Crab Nebula, keeps surprising the scientific community with unexplained phenomena such as the recently discovered very energetic flares of high energy gamma-ray radiation. During its most recent flare, which was detected by the Fermi satellite in March 2013, the Crab Nebula was simultaneously observed with the H.E.S.S. array for six nights. The results of the observations will be discussed in detail during the course of this work. During the nights of the flare, the new 24 m × 32 m H.E.S.S. II telescope was still being commissioned, but participated in the data taking for one night. To be able to reconstruct and analyze the data of the H.E.S.S. Phase II array, the algorithms and software used by the H.E.S.S. Phase I array had to be adapted. The most prominent advanced shower reconstruction technique developed by de Naurois and Rolland, the template-based model analysis, compares real shower images taken by the Cherenkov telescope cameras with shower templates obtained using a semi-analytical model. To find the best fitting image, and, therefore, the relevant parameters that describe the air shower best, a pixel-wise log-likelihood fit is done. The adaptation of this advanced shower reconstruction technique to the heterogeneous H.E.S.S. Phase II array for stereo events (i.e. air showers seen by at least two telescopes of any kind), its performance using MonteCarlo simulations as well as its application to real data will be described.}, language = {en} } @misc{Donges2009, type = {Master Thesis}, author = {Donges, Jonathan Friedemann}, title = {Complex networks in the climate system}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-49775}, school = {Universit{\"a}t Potsdam}, year = {2009}, abstract = {Complex network theory provides an elegant and powerful framework to statistically investigate the topology of local and long range dynamical interrelationships, i.e., teleconnections, in the climate system. Employing a refined methodology relying on linear and nonlinear measures of time series analysis, the intricate correlation structure within a multivariate climatological data set is cast into network form. Within this graph theoretical framework, vertices are identified with grid points taken from the data set representing a region on the the Earth's surface, and edges correspond to strong statistical interrelationships between the dynamics on pairs of grid points. The resulting climate networks are neither perfectly regular nor completely random, but display the intriguing and nontrivial characteristics of complexity commonly found in real world networks such as the internet, citation and acquaintance networks, food webs and cortical networks in the mammalian brain. Among other interesting properties, climate networks exhibit the "small-world" effect and possess a broad degree distribution with dominating super-nodes as well as a pronounced community structure. We have performed an extensive and detailed graph theoretical analysis of climate networks on the global topological scale focussing on the flow and centrality measure betweenness which is locally defined at each vertex, but includes global topological information by relying on the distribution of shortest paths between all pairs of vertices in the network. The betweenness centrality field reveals a rich internal structure in complex climate networks constructed from reanalysis and atmosphere-ocean coupled general circulation model (AOGCM) surface air temperature data. Our novel approach uncovers an elaborately woven meta-network of highly localized channels of strong dynamical information flow, that we relate to global surface ocean currents and dub the backbone of the climate network in analogy to the homonymous data highways of the internet. This finding points to a major role of the oceanic surface circulation in coupling and stabilizing the global temperature field in the long term mean (140 years for the model run and 60 years for reanalysis data). Carefully comparing the backbone structures detected in climate networks constructed using linear Pearson correlation and nonlinear mutual information, we argue that the high sensitivity of betweenness with respect to small changes in network structure may allow to detect the footprints of strongly nonlinear physical interactions in the climate system. The results presented in this thesis are thoroughly founded and substantiated using a hierarchy of statistical significance tests on the level of time series and networks, i.e., by tests based on time series surrogates as well as network surrogates. This is particularly relevant when working with real world data. Specifically, we developed new types of network surrogates to include the additional constraints imposed by the spatial embedding of vertices in a climate network. Our methodology is of potential interest for a broad audience within the physics community and various applied fields, because it is universal in the sense of being valid for any spatially extended dynamical system. It can help to understand the localized flow of dynamical information in any such system by combining multivariate time series analysis, a complex network approach and the information flow measure betweenness centrality. Possible fields of application include fluid dynamics (turbulence), plasma physics and biological physics (population models, neural networks, cell models). Furthermore, the climate network approach is equally relevant for experimental data as well as model simulations and hence introduces a novel perspective on model evaluation and data driven model building. Our work is timely in the context of the current debate on climate change within the scientific community, since it allows to assess from a new perspective the regional vulnerability and stability of the climate system while relying on global and not only on regional knowledge. The methodology developed in this thesis hence has the potential to substantially contribute to the understanding of the local effect of extreme events and tipping points in the earth system within a holistic global framework.}, language = {en} } @phdthesis{Ohme2012, author = {Ohme, Frank}, title = {Bridging the gap between post-Newtonian theory and numerical relativity in gravitational-wave data analysis}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-60346}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {One of the most exciting predictions of Einstein's theory of gravitation that have not yet been proven experimentally by a direct detection are gravitational waves. These are tiny distortions of the spacetime itself, and a world-wide effort to directly measure them for the first time with a network of large-scale laser interferometers is currently ongoing and expected to provide positive results within this decade. One potential source of measurable gravitational waves is the inspiral and merger of two compact objects, such as binary black holes. Successfully finding their signature in the noise-dominated data of the detectors crucially relies on accurate predictions of what we are looking for. In this thesis, we present a detailed study of how the most complete waveform templates can be constructed by combining the results from (A) analytical expansions within the post-Newtonian framework and (B) numerical simulations of the full relativistic dynamics. We analyze various strategies to construct complete hybrid waveforms that consist of a post-Newtonian inspiral part matched to numerical-relativity data. We elaborate on exsisting approaches for nonspinning systems by extending the accessible parameter space and introducing an alternative scheme based in the Fourier domain. Our methods can now be readily applied to multiple spherical-harmonic modes and precessing systems. In addition to that, we analyze in detail the accuracy of hybrid waveforms with the goal to quantify how numerous sources of error in the approximation techniques affect the application of such templates in real gravitational-wave searches. This is of major importance for the future construction of improved models, but also for the correct interpretation of gravitational-wave observations that are made utilizing any complete waveform family. In particular, we comprehensively discuss how long the numerical-relativity contribution to the signal has to be in order to make the resulting hybrids accurate enough, and for currently feasible simulation lengths we assess the physics one can potentially do with template-based searches.}, language = {en} }