@phdthesis{Giuri2023, author = {Giuri, Chiara}, title = {VERITAS Dark Matter search in dwarf spheroidal galaxies: an extended analysis}, doi = {10.25932/publishup-57586}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-575869}, school = {Universit{\"a}t Potsdam}, pages = {xxiv, 227}, year = {2023}, abstract = {In the last century, several astronomical measurements have supported that a significant percentage (about 22\%) of the total mass of the Universe, on galactic and extragalactic scales, is composed of a mysterious "dark" matter (DM). DM does not interact with the electromagnetic force; in other words it does not reflect, absorb or emit light. It is possible that DM particles are weakly interacting massive particles (WIMPs) that can annihilate (or decay) into Standard Model (SM) particles, and modern very- high-energy (VHE; > 100 GeV) instruments such as imaging atmospheric Cherenkov telescopes (IACTs) can play an important role in constraining the main properties of such DM particles, by detecting these products. One of the most privileged targets where to look for DM signal are dwarf spheroidal galaxies (dSphs), as they are expected to be high DM-dominated objects with a clean, gas-free environment. Some dSphs could be considered as extended sources, considering the angular resolution of IACTs; their angu- lar resolution is adequate to detect extended emission from dSphs. For this reason, we performed an extended-source analysis, by taking into account in the unbinned maximum likelihood estimation both the energy and the angular extension dependency of observed events. The goal was to set more constrained upper limits on the velocity-averaged cross-section annihilation of WIMPs with VERITAS data. VERITAS is an array of four IACTs, able to detect γ-ray photons ranging between 100 GeV and 30 TeV. The results of this extended analysis were compared against the traditional spectral analysis. We found that a 2D analysis may lead to more constrained results, depending on the DM mass, channel, and source. Moreover, in this thesis, the results of a multi-instrument project are presented too. Its goal was to combine already published 20 dSphs data from five different experiments, such as Fermi-LAT, MAGIC, H.E.S.S., VERITAS and HAWC, in order to set upper limits on the WIMP annihilation cross-section in the widest mass range ever reported.}, language = {en} } @phdthesis{Goswami2014, author = {Goswami, Bedartha}, title = {Uncertainties in climate data analysis}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-78312}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Scientific inquiry requires that we formulate not only what we know, but also what we do not know and by how much. In climate data analysis, this involves an accurate specification of measured quantities and a consequent analysis that consciously propagates the measurement errors at each step. The dissertation presents a thorough analytical method to quantify errors of measurement inherent in paleoclimate data. An additional focus are the uncertainties in assessing the coupling between different factors that influence the global mean temperature (GMT). Paleoclimate studies critically rely on `proxy variables' that record climatic signals in natural archives. However, such proxy records inherently involve uncertainties in determining the age of the signal. We present a generic Bayesian approach to analytically determine the proxy record along with its associated uncertainty, resulting in a time-ordered sequence of correlated probability distributions rather than a precise time series. We further develop a recurrence based method to detect dynamical events from the proxy probability distributions. The methods are validated with synthetic examples and demonstrated with real-world proxy records. The proxy estimation step reveals the interrelations between proxy variability and uncertainty. The recurrence analysis of the East Asian Summer Monsoon during the last 9000 years confirms the well-known `dry' events at 8200 and 4400 BP, plus an additional significantly dry event at 6900 BP. We also analyze the network of dependencies surrounding GMT. We find an intricate, directed network with multiple links between the different factors at multiple time delays. We further uncover a significant feedback from the GMT to the El Ni{\~n}o Southern Oscillation at quasi-biennial timescales. The analysis highlights the need of a more nuanced formulation of influences between different climatic factors, as well as the limitations in trying to estimate such dependencies.}, language = {en} } @phdthesis{Bergner2011, author = {Bergner, Andr{\´e}}, title = {Synchronization in complex systems with multiple time scales}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-53407}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {In the present work synchronization phenomena in complex dynamical systems exhibiting multiple time scales have been analyzed. Multiple time scales can be active in different manners. Three different systems have been analyzed with different methods from data analysis. The first system studied is a large heterogenous network of bursting neurons, that is a system with two predominant time scales, the fast firing of action potentials (spikes) and the burst of repetitive spikes followed by a quiescent phase. This system has been integrated numerically and analyzed with methods based on recurrence in phase space. An interesting result are the different transitions to synchrony found in the two distinct time scales. Moreover, an anomalous synchronization effect can be observed in the fast time scale, i.e. there is range of the coupling strength where desynchronization occurs. The second system analyzed, numerically as well as experimentally, is a pair of coupled CO₂ lasers in a chaotic bursting regime. This system is interesting due to its similarity with epidemic models. We explain the bursts by different time scales generated from unstable periodic orbits embedded in the chaotic attractor and perform a synchronization analysis of these different orbits utilizing the continuous wavelet transform. We find a diverse route to synchrony of these different observed time scales. The last system studied is a small network motif of limit cycle oscillators. Precisely, we have studied a hub motif, which serves as elementary building block for scale-free networks, a type of network found in many real world applications. These hubs are of special importance for communication and information transfer in complex networks. Here, a detailed study on the mechanism of synchronization in oscillatory networks with a broad frequency distribution has been carried out. In particular, we find a remote synchronization of nodes in the network which are not directly coupled. We also explain the responsible mechanism and its limitations and constraints. Further we derive an analytic expression for it and show that information transmission in pure phase oscillators, such as the Kuramoto type, is limited. In addition to the numerical and analytic analysis an experiment consisting of electrical circuits has been designed. The obtained results confirm the former findings.}, language = {en} } @phdthesis{Thiel2004, author = {Thiel, Marco}, title = {Recurrences : exploiting naturally occurring analogues}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0001633}, school = {Universit{\"a}t Potsdam}, year = {2004}, abstract = {In der vorliegenden Arbeit wird die Wiederkehr im Phasenraum ausgenutzt. Dabei werden drei Hauptresultate besprochen. 1. Die Wiederkehr erlaubt die Vorhersagbarkeit des Systems zu quantifizieren. 2. Die Wiederkehr enthaelt (unter bestimmten Voraussetzungen) s{\"a}mtliche relevante Information {\"u}ber die Dynamik im Phasenraum 3. Die Wiederkehr erlaubt die Erzeugung dynamischer Ersatzdaten.}, language = {en} } @phdthesis{Mayer2014, author = {Mayer, Michael}, title = {Pulsar wind nebulae at high energies}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-71504}, school = {Universit{\"a}t Potsdam}, pages = {142}, year = {2014}, abstract = {Pulsar wind nebulae (PWNe) are the most abundant TeV gamma-ray emitters in the Milky Way. The radiative emission of these objects is powered by fast-rotating pulsars, which donate parts of their rotational energy into winds of relativistic particles. This thesis presents an in-depth study of the detected population of PWNe at high energies. To outline general trends regarding their evolutionary behaviour, a time-dependent model is introduced and compared to the available data. In particular, this work presents two exceptional PWNe which protrude from the rest of the population, namely the Crab Nebula and N 157B. Both objects are driven by pulsars with extremely high rotational energy loss rates. Accordingly, they are often referred to as energetic twins. Modelling the non-thermal multi-wavelength emission of N157B gives access to specific properties of this object, like the magnetic field inside the nebula. Comparing the derived parameters to those of the Crab Nebula reveals large intrinsic differences between the two PWNe. Possible origins of these differences are discussed in context of the resembling pulsars. Compared to the TeV gamma-ray regime, the number of detected PWNe is much smaller in the MeV-GeV gamma-ray range. In the latter range, the Crab Nebula stands out by the recent detection of gamma-ray flares. In general, the measured flux enhancements on short time scales of days to weeks were not expected in the theoretical understanding of PWNe. In this thesis, the variability of the Crab Nebula is analysed using data from the Fermi Large Area Telescope (Fermi-LAT). For the presented analysis, a new gamma-ray reconstruction method is used, providing a higher sensitivity and a lower energy threshold compared to previous analyses. The derived gamma-ray light curve of the Crab Nebula is investigated for flares and periodicity. The detected flares are analysed regarding their energy spectra, and their variety and commonalities are discussed. In addition, a dedicated analysis of the flare which occurred in March 2013 is performed. The derived short-term variability time scale is roughly 6h, implying a small region inside the Crab Nebula to be responsible for the enigmatic flares. The most promising theories explaining the origins of the flux eruptions and gamma-ray variability are discussed in detail. In the technical part of this work, a new analysis framework is presented. The introduced software, called gammalib/ctools, is currently being developed for the future CTA observa- tory. The analysis framework is extensively tested using data from the H. E. S. S. experiment. To conduct proper data analysis in the likelihood framework of gammalib/ctools, a model describing the distribution of background events in H.E.S.S. data is presented. The software provides the infrastructure to combine data from several instruments in one analysis. To study the gamma-ray emitting PWN population, data from Fermi-LAT and H. E. S. S. are combined in the likelihood framework of gammalib/ctools. In particular, the spectral peak, which usually lies in the overlap energy regime between these two instruments, is determined with the presented analysis framework. The derived measurements are compared to the predictions from the time-dependent model. The combined analysis supports the conclusion of a diverse population of gamma-ray emitting PWNe.}, language = {en} } @phdthesis{Holler2014, author = {Holler, Markus}, title = {Photon reconstruction for the H.E.S.S. 28 m telescope and analysis of Crab Nebula and galactic centre observations}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72099}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {In the presented thesis, the most advanced photon reconstruction technique of ground-based γ-ray astronomy is adapted to the H.E.S.S. 28 m telescope. The method is based on a semi-analytical model of electromagnetic particle showers in the atmosphere. The properties of cosmic γ-rays are reconstructed by comparing the camera image of the telescope with the Cherenkov emission that is expected from the shower model. To suppress the dominant background from charged cosmic rays, events are selected based on several criteria. The performance of the analysis is evaluated with simulated events. The method is then applied to two sources that are known to emit γ-rays. The first of these is the Crab Nebula, the standard candle of ground-based γ-ray astronomy. The results of this source confirm the expected performance of the reconstruction method, where the much lower energy threshold compared to H.E.S.S. I is of particular importance. A second analysis is performed on the region around the Galactic Centre. The analysis results emphasise the capabilities of the new telescope to measure γ-rays in an energy range that is interesting for both theoretical and experimental astrophysics. The presented analysis features the lowest energy threshold that has ever been reached in ground-based γ-ray astronomy, opening a new window to the precise measurement of the physical properties of time-variable sources at energies of several tens of GeV.}, language = {en} } @phdthesis{Rosenblum2003, author = {Rosenblum, Michael}, title = {Phase synchronization of chaotic systems : from theory to experimental applications}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0000682}, school = {Universit{\"a}t Potsdam}, year = {2003}, abstract = {In einem klassischen Kontext bedeutet Synchronisierung die Anpassung der Rhythmen von selbst-erregten periodischen Oszillatoren aufgrund ihrer schwachen Wechselwirkung. Der Begriff der Synchronisierung geht auf den ber{\"u}hmten niederl{\"a}andischen Wissenschaftler Christiaan Huygens im 17. Jahrhundert zur{\"u}ck, der {\"u}ber seine Beobachtungen mit Pendeluhren berichtete. Wenn zwei solche Uhren auf der selben Unterlage plaziert wurden, schwangen ihre Pendel in perfekter {\"U}bereinstimmung. Mathematisch bedeutet das, daß infolge der Kopplung, die Uhren mit gleichen Frequenzen und engverwandten Phasen zu oszillieren begannen. Als wahrscheinlich {\"a}ltester beobachteter nichtlinearer Effekt wurde die Synchronisierung erst nach den Arbeiten von E. V. Appleton und B. Van der Pol gegen 1920 verstanden, die die Synchronisierung in Triodengeneratoren systematisch untersucht haben. Seitdem wurde die Theorie gut entwickelt, und hat viele Anwendungen gefunden. Heutzutage weiss man, dass bestimmte, sogar ziemlich einfache, Systeme, ein chaotisches Verhalten aus{\"u}ben k{\"o}nnen. Dies bedeutet, dass ihre Rhythmen unregelm{\"a}ßig sind und nicht durch nur eine einzige Frequenz charakterisiert werden k{\"o}nnen. Wie in der Habilitationsarbeit gezeigt wurde, kann man jedoch den Begriff der Phase und damit auch der Synchronisierung auf chaotische Systeme ausweiten. Wegen ihrer sehr schwachen Wechselwirkung treten Beziehungen zwischen den Phasen und den gemittelten Frequenzen auf und f{\"u}hren damit zur {\"U}bereinstimmung der immer noch unregelm{\"a}ßigen Rhythmen. Dieser Effekt, sogenannter Phasensynchronisierung, konnte sp{\"a}ter in Laborexperimenten anderer wissenschaftlicher Gruppen best{\"a}tigt werden. Das Verst{\"a}ndnis der Synchronisierung unregelm{\"a}ßiger Oszillatoren erlaubte es uns, wichtige Probleme der Datenanalyse zu untersuchen. Ein Hauptbeispiel ist das Problem der Identifikation schwacher Wechselwirkungen zwischen Systemen, die nur eine passive Messung erlauben. Diese Situation trifft h{\"a}ufig in lebenden Systemen auf, wo Synchronisierungsph{\"a}nomene auf jedem Niveau erscheinen - auf der Ebene von Zellen bis hin zu makroskopischen physiologischen Systemen; in normalen Zust{\"a}nden und auch in Zust{\"a}nden ernster Pathologie. Mit unseren Methoden konnten wir eine Anpassung in den Rhythmen von Herz-Kreislauf und Atmungssystem in Menschen feststellen, wobei der Grad ihrer Interaktion mit der Reifung zunimmt. Weiterhin haben wir unsere Algorithmen benutzt, um die Gehirnaktivit{\"a}t von an Parkinson Erkrankten zu analysieren. Die Ergebnisse dieser Kollaboration mit Neurowissenschaftlern zeigen, dass sich verschiedene Gehirnbereiche genau vor Beginn des pathologischen Zitterns synchronisieren. Außerdem gelang es uns, die f{\"u}r das Zittern verantwortliche Gehirnregion zu lokalisieren.}, language = {en} } @misc{Dietrich2008, type = {Master Thesis}, author = {Dietrich, Jan Philipp}, title = {Phase Space Reconstruction using the frequency domain : a generalization of actual methods}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-50738}, school = {Universit{\"a}t Potsdam}, year = {2008}, abstract = {Phase Space Reconstruction is a method that allows to reconstruct the phase space of a system using only an one dimensional time series as input. It can be used for calculating Lyapunov-exponents and detecting chaos. It helps to understand complex dynamics and their behavior. And it can reproduce datasets which were not measured. There are many different methods which produce correct reconstructions such as time-delay, Hilbert-transformation, derivation and integration. The most used one is time-delay but all methods have special properties which are useful in different situations. Hence, every reconstruction method has some situations where it is the best choice. Looking at all these different methods the questions are: Why can all these different looking methods be used for the same purpose? Is there any connection between all these functions? The answer is found in the frequency domain : Performing a Fourier transformation all these methods getting a similar shape: Every presented reconstruction method can be described as a multiplication in the frequency domain with a frequency-depending reconstruction function. This structure is also known as a filter. From this point of view every reconstructed dimension can be seen as a filtered version of the measured time series. It contains the original data but applies just a new focus: Some parts are amplified and other parts are reduced. Furthermore I show, that not every function can be used for reconstruction. In the thesis three characteristics are identified, which are mandatory for the reconstruction function. Under consideration of these restrictions one gets a whole bunch of new reconstruction functions. So it is possible to reduce noise within the reconstruction process itself or to use some advantages of already known reconstructions methods while suppressing unwanted characteristics of it.}, language = {en} } @misc{DrielGesztelyiBakerToeroeketal.2013, author = {Driel-Gesztelyi, L. van and Baker, Daniel N. and T{\"o}r{\"o}k, Tibor and Pariat, Etienne and Green, L. M. and Williams, D. R. and Carlyle, J. and Valori, G. and D{\´e}moulin, Pascal and Matthews, S. A. and Kliem, Bernhard and Malherbe, J.-M.}, title = {Magnetic reconnection driven by filament eruption in the 7 June 2011 event}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch Naturwissenschaftliche Reihe}, number = {608}, doi = {10.25932/publishup-41567}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-415671}, pages = {502 -- 503}, year = {2013}, abstract = {During an unusually massive filament eruption on 7 June 2011, SDO/AIA imaged for the first time significant EUV emission around a magnetic reconnection region in the solar corona. The reconnection occurred between magnetic fields of the laterally expanding CME and a neighbouring active region. A pre-existing quasi-separatrix layer was activated in the process. This scenario is supported by data-constrained numerical simulations of the eruption. Observations show that dense cool filament plasma was re-directed and heated in situ, producing coronal-temperature emission around the reconnection region. These results provide the first direct observational evidence, supported by MHD simulations and magnetic modelling, that a large-scale re-configuration of the coronal magnetic field takes place during solar eruptions via the process of magnetic reconnection.}, language = {en} } @phdthesis{Duy2023, author = {Duy, Nguyen Le}, title = {Hydrological processes in the Vietnamese Mekong Delta}, doi = {10.25932/publishup-60260}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-602607}, school = {Universit{\"a}t Potsdam}, pages = {xxi, 153}, year = {2023}, abstract = {Understanding hydrological processes is of fundamental importance for the Vietnamese national food security and the livelihood of the population in the Vietnamese Mekong Delta (VMD). As a consequence of sparse data in this region, however, hydrologic processes, such as the controlling processes of precipitation, the interaction between surface and groundwater, and groundwater dynamics, have not been thoroughly studied. The lack of this knowledge may negatively impact the long-term strategic planning for sustainable groundwater resources management and may result in insufficient groundwater recharge and freshwater scarcity. It is essential to develop useful methods for a better understanding of hydrological processes in such data-sparse regions. The goal of this dissertation is to advance methodologies that can improve the understanding of fundamental hydrological processes in the VMD, based on the analyses of stable water isotopes and monitoring data. The thesis mainly focuses on the controlling processes of precipitation, the mechanism of surface-groundwater interaction, and the groundwater dynamics. These processes have not been fully addressed in the VMD so far. The thesis is based on statistical analyses of the isotopic data of Global Network of Isotopes in Precipitation (GNIP), of meteorological and hydrological data from Vietnamese agencies, and of the stable water isotopes and monitoring data collected as part of this work. First, the controlling processes of precipitation were quantified by the combination of trajectory analysis, multi-factor linear regression, and relative importance analysis (hereafter, a model-based statistical approach). The validity of this approach is confirmed by similar, but mainly qualitative results obtained in other studies. The total variation in precipitation isotopes (δ18O and δ2H) can be better explained by multiple linear regression (up to 80\%) than single-factor linear regression (30\%). The relative importance analysis indicates that atmospheric moisture regimes control precipitation isotopes rather than local climatic conditions. The most crucial factor is the upstream rainfall along the trajectories of air mass movement. However, the influences of regional and local climatic factors vary in importance over the seasons. The developed model-based statistical approach is a robust tool for the interpretation of precipitation isotopes and could also be applied to understand the controlling processes of precipitation in other regions. Second, the concept of the two-component lumped-parameter model (LPM) in conjunction with stable water isotopes was applied to examine the surface-groundwater interaction in the VMD. A calibration framework was also set up to evaluate the behaviour, parameter identifiability, and uncertainties of two-component LPMs. The modelling results provided insights on the subsurface flow conditions, the recharge contributions, and the spatial variation of groundwater transit time. The subsurface flow conditions at the study site can be best represented by the linear-piston flow distribution. The contributions of the recharge sources change with distance to the river. The mean transit time (mTT) of riverbank infiltration increases with the length of the horizontal flow path and the decreasing gradient between river and groundwater. River water infiltrates horizontally mainly via the highly permeable aquifer, resulting in short mTTs (<40 weeks) for locations close to the river (<200 m). The vertical infiltration from precipitation takes place primarily via a low-permeable overlying aquitard, resulting in considerably longer mTTs (>80 weeks). Notably, the transit time of precipitation infiltration is independent of the distance to the river. All these results are hydrologically plausible and could be quantified by the presented method for the first time. This study indicates that the highly complex mechanism of surface-groundwater interaction at riverbank infiltration systems can be conceptualized by exploiting two-component LPMs. It is illustrated that the model concept can be used as a tool to investigate the hydrological functioning of mixing processes and the flow path of multiple water components in riverbank infiltration systems. Lastly, a suite of time series analysis approaches was applied to examine the groundwater dynamics in the VMD. The assessment was focused on the time-variant trends of groundwater levels (GWLs), the groundwater memory effect (representing the time that an aquifer holds water), and the hydraulic response between surface water and multi-layer alluvial aquifers. The analysis indicates that the aquifers act as low-pass filters to reduce the high-frequency signals in the GWL variations, and limit the recharge to the deep groundwater. The groundwater abstraction has exceeded groundwater recharge between 1997 and 2017, leading to the decline of groundwater levels (0.01-0.55 m/year) in all considered aquifers in the VMD. The memory effect varies according to the geographical location, being shorter in shallow aquifers and flood-prone areas and longer in deep aquifers and coastal regions. Groundwater depth, season, and location primarily control the variation of the response time between the river and alluvial aquifers. These findings are important contributions to the hydrogeological literature of a little-known groundwater system in an alluvial setting. It is suggested that time series analysis can be used as an efficient tool to understand groundwater systems where resources are insufficient to develop a physical-based groundwater model. This doctoral thesis demonstrates that important aspects of hydrological processes can be understood by statistical analysis of stable water isotope and monitoring data. The approaches developed in this thesis can be easily transferred to regions in similar tropical environments, particularly those in alluvial settings. The results of the thesis can be used as a baseline for future isotope-based studies and contribute to the hydrogeological literature of little-known groundwater systems in the VMD.}, language = {en} }