@article{DinevaPearsonIlyinetal.2022, author = {Dineva, Ekaterina and Pearson, Jeniveve and Ilyin, Ilya and Verma, Meetu and Diercke, Andrea and Strassmeier, Klaus and Denker, Carsten}, title = {Characterization of chromospheric activity based on Sun-as-a-star spectral and disk-resolved activity indices}, series = {Astronomische Nachrichten = Astronomical notes}, volume = {343}, journal = {Astronomische Nachrichten = Astronomical notes}, number = {5}, publisher = {Wiley-VCH}, address = {Weinheim}, issn = {0004-6337}, doi = {10.1002/asna.20223996}, pages = {23}, year = {2022}, abstract = {The strong chromospheric absorption lines Ca ii H \& K are tightly connected to stellar surface magnetic fields. Only for the Sun, spectral activity indices can be related to evolving magnetic features on the solar disk. The Solar Disk-Integrated (SDI) telescope feeds the Potsdam Echelle Polarimetric and Spectroscopic Instrument (PEPSI) of the Large Binocular Telescope (LBT) at Mt. Graham International Observatory, Arizona, U.S.A. We present high-resolution, high-fidelity spectra that were recorded on 184 \& 82 days in 2018 \& 2019 and derive the Ca ii H \& K emission ratio, that is, the S-index. In addition, we compile excess brightness and area indices based on full-disk Ca ii K-line-core filtergrams of the Chromospheric Telescope (ChroTel) at Observatorio del Teide, Tenerife, Spain and full-disk ultraviolet (UV) 1600 angstrom images of the Atmospheric Imaging Assembly (AIA) onboard the Solar Dynamics Observatory (SDO). Thus, Sun-as-a-star spectral indices are related to their counterparts derived from resolved images of the solar chromosphere. All indices display signatures of rotational modulation, even during the very low magnetic activity in the minimum of Solar Cycle 24. Bringing together different types of activity indices has the potential to join disparate chromospheric datasets yielding a comprehensive description of chromospheric activity across many solar cycles.}, language = {en} } @article{AdnanSrsicVenticichetal.2020, author = {Adnan, Hassan Sami and Srsic, Amanda and Venticich, Pete Milos and Townend, David M.R.}, title = {Using AI for mental health analysis and prediction in school surveys}, series = {European journal of public health}, volume = {30}, journal = {European journal of public health}, publisher = {Oxford Univ. Press}, address = {Oxford [u.a.]}, issn = {1101-1262}, doi = {10.1093/eurpub/ckaa165.336}, pages = {V125 -- V125}, year = {2020}, abstract = {Background: Childhood and adolescence are critical stages of life for mental health and well-being. Schools are a key setting for mental health promotion and illness prevention. One in five children and adolescents have a mental disorder, about half of mental disorders beginning before the age of 14. Beneficial and explainable artificial intelligence can replace current paper- based and online approaches to school mental health surveys. This can enhance data acquisition, interoperability, data driven analysis, trust and compliance. This paper presents a model for using chatbots for non-obtrusive data collection and supervised machine learning models for data analysis; and discusses ethical considerations pertaining to the use of these models. Methods: For data acquisition, the proposed model uses chatbots which interact with students. The conversation log acts as the source of raw data for the machine learning. Pre-processing of the data is automated by filtering for keywords and phrases. Existing survey results, obtained through current paper-based data collection methods, are evaluated by domain experts (health professionals). These can be used to create a test dataset to validate the machine learning models. Supervised learning can then be deployed to classify specific behaviour and mental health patterns. Results: We present a model that can be used to improve upon current paper-based data collection and manual data analysis methods. An open-source GitHub repository contains necessary tools and components of this model. Privacy is respected through rigorous observance of confidentiality and data protection requirements. Critical reflection on these ethics and law aspects is included in the project. Conclusions: This model strengthens mental health surveillance in schools. The same tools and components could be applied to other public health data. Future extensions of this model could also incorporate unsupervised learning to find clusters and patterns of unknown effects.}, language = {en} } @phdthesis{Duy2023, author = {Duy, Nguyen Le}, title = {Hydrological processes in the Vietnamese Mekong Delta}, doi = {10.25932/publishup-60260}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-602607}, school = {Universit{\"a}t Potsdam}, pages = {xxi, 153}, year = {2023}, abstract = {Understanding hydrological processes is of fundamental importance for the Vietnamese national food security and the livelihood of the population in the Vietnamese Mekong Delta (VMD). As a consequence of sparse data in this region, however, hydrologic processes, such as the controlling processes of precipitation, the interaction between surface and groundwater, and groundwater dynamics, have not been thoroughly studied. The lack of this knowledge may negatively impact the long-term strategic planning for sustainable groundwater resources management and may result in insufficient groundwater recharge and freshwater scarcity. It is essential to develop useful methods for a better understanding of hydrological processes in such data-sparse regions. The goal of this dissertation is to advance methodologies that can improve the understanding of fundamental hydrological processes in the VMD, based on the analyses of stable water isotopes and monitoring data. The thesis mainly focuses on the controlling processes of precipitation, the mechanism of surface-groundwater interaction, and the groundwater dynamics. These processes have not been fully addressed in the VMD so far. The thesis is based on statistical analyses of the isotopic data of Global Network of Isotopes in Precipitation (GNIP), of meteorological and hydrological data from Vietnamese agencies, and of the stable water isotopes and monitoring data collected as part of this work. First, the controlling processes of precipitation were quantified by the combination of trajectory analysis, multi-factor linear regression, and relative importance analysis (hereafter, a model-based statistical approach). The validity of this approach is confirmed by similar, but mainly qualitative results obtained in other studies. The total variation in precipitation isotopes (δ18O and δ2H) can be better explained by multiple linear regression (up to 80\%) than single-factor linear regression (30\%). The relative importance analysis indicates that atmospheric moisture regimes control precipitation isotopes rather than local climatic conditions. The most crucial factor is the upstream rainfall along the trajectories of air mass movement. However, the influences of regional and local climatic factors vary in importance over the seasons. The developed model-based statistical approach is a robust tool for the interpretation of precipitation isotopes and could also be applied to understand the controlling processes of precipitation in other regions. Second, the concept of the two-component lumped-parameter model (LPM) in conjunction with stable water isotopes was applied to examine the surface-groundwater interaction in the VMD. A calibration framework was also set up to evaluate the behaviour, parameter identifiability, and uncertainties of two-component LPMs. The modelling results provided insights on the subsurface flow conditions, the recharge contributions, and the spatial variation of groundwater transit time. The subsurface flow conditions at the study site can be best represented by the linear-piston flow distribution. The contributions of the recharge sources change with distance to the river. The mean transit time (mTT) of riverbank infiltration increases with the length of the horizontal flow path and the decreasing gradient between river and groundwater. River water infiltrates horizontally mainly via the highly permeable aquifer, resulting in short mTTs (<40 weeks) for locations close to the river (<200 m). The vertical infiltration from precipitation takes place primarily via a low-permeable overlying aquitard, resulting in considerably longer mTTs (>80 weeks). Notably, the transit time of precipitation infiltration is independent of the distance to the river. All these results are hydrologically plausible and could be quantified by the presented method for the first time. This study indicates that the highly complex mechanism of surface-groundwater interaction at riverbank infiltration systems can be conceptualized by exploiting two-component LPMs. It is illustrated that the model concept can be used as a tool to investigate the hydrological functioning of mixing processes and the flow path of multiple water components in riverbank infiltration systems. Lastly, a suite of time series analysis approaches was applied to examine the groundwater dynamics in the VMD. The assessment was focused on the time-variant trends of groundwater levels (GWLs), the groundwater memory effect (representing the time that an aquifer holds water), and the hydraulic response between surface water and multi-layer alluvial aquifers. The analysis indicates that the aquifers act as low-pass filters to reduce the high-frequency signals in the GWL variations, and limit the recharge to the deep groundwater. The groundwater abstraction has exceeded groundwater recharge between 1997 and 2017, leading to the decline of groundwater levels (0.01-0.55 m/year) in all considered aquifers in the VMD. The memory effect varies according to the geographical location, being shorter in shallow aquifers and flood-prone areas and longer in deep aquifers and coastal regions. Groundwater depth, season, and location primarily control the variation of the response time between the river and alluvial aquifers. These findings are important contributions to the hydrogeological literature of a little-known groundwater system in an alluvial setting. It is suggested that time series analysis can be used as an efficient tool to understand groundwater systems where resources are insufficient to develop a physical-based groundwater model. This doctoral thesis demonstrates that important aspects of hydrological processes can be understood by statistical analysis of stable water isotope and monitoring data. The approaches developed in this thesis can be easily transferred to regions in similar tropical environments, particularly those in alluvial settings. The results of the thesis can be used as a baseline for future isotope-based studies and contribute to the hydrogeological literature of little-known groundwater systems in the VMD.}, language = {en} } @phdthesis{Antonelli2021, author = {Antonelli, Andrea}, title = {Accurate waveform models for gravitational-wave astrophysics: synergetic approaches from analytical relativity}, doi = {10.25932/publishup-57667}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-576671}, school = {Universit{\"a}t Potsdam}, pages = {XII, 259, LXXV}, year = {2021}, abstract = {Gravitational-wave (GW) astrophysics is a field in full blossom. Since the landmark detection of GWs from a binary black hole on September 14th 2015, fifty-two compact-object binaries have been reported by the LIGO-Virgo collaboration. Such events carry astrophysical and cosmological information ranging from an understanding of how black holes and neutron stars are formed, what neutron stars are composed of, how the Universe expands, and allow testing general relativity in the highly-dynamical strong-field regime. It is the goal of GW astrophysics to extract such information as accurately as possible. Yet, this is only possible if the tools and technology used to detect and analyze GWs are advanced enough. A key aspect of GW searches are waveform models, which encapsulate our best predictions for the gravitational radiation under a certain set of parameters, and that need to be cross-correlated with data to extract GW signals. Waveforms must be very accurate to avoid missing important physics in the data, which might be the key to answer the fundamental questions of GW astrophysics. The continuous improvements of the current LIGO-Virgo detectors, the development of next-generation ground-based detectors such as the Einstein Telescope or the Cosmic Explorer, as well as the development of the Laser Interferometer Space Antenna (LISA), demand accurate waveform models. While available models are enough to capture the low spins, comparable-mass binaries routinely detected in LIGO-Virgo searches, those for sources from both current and next-generation ground-based and spaceborne detectors must be accurate enough to detect binaries with large spins and asymmetry in the masses. Moreover, the thousands of sources that we expect to detect with future detectors demand accurate waveforms to mitigate biases in the estimation of signals' parameters due to the presence of a foreground of many sources that overlap in the frequency band. This is recognized as one of the biggest challenges for the analysis of future-detectors' data, since biases might hinder the extraction of important astrophysical and cosmological information from future detectors' data. In the first part of this thesis, we discuss how to improve waveform models for binaries with high spins and asymmetry in the masses. In the second, we present the first generic metrics that have been proposed to predict biases in the presence of a foreground of many overlapping signals in GW data. For the first task, we will focus on several classes of analytical techniques. Current models for LIGO and Virgo studies are based on the post-Newtonian (PN, weak-field, small velocities) approximation that is most natural for the bound orbits that are routinely detected in GW searches. However, two other approximations have risen in prominence, the post-Minkowskian (PM, weak- field only) approximation natural for unbound (scattering) orbits and the small-mass-ratio (SMR) approximation typical of binaries in which the mass of one body is much bigger than the other. These are most appropriate to binaries with high asymmetry in the masses that challenge current waveform models. Moreover, they allow one to "cover" regions of the parameter space of coalescing binaries, thereby improving the interpolation (and faithfulness) of waveform models. The analytical approximations to the relativistic two-body problem can synergically be included within the effective-one-body (EOB) formalism, in which the two-body information from each approximation can be recast into an effective problem of a mass orbiting a deformed Schwarzschild (or Kerr) black hole. The hope is that the resultant models can cover both the low-spin comparable-mass binaries that are routinely detected, and the ones that challenge current models. The first part of this thesis is dedicated to a study about how to best incorporate information from the PN, PM, SMR and EOB approaches in a synergistic way. We also discuss how accurate the resulting waveforms are, as compared against numerical-relativity (NR) simulations. We begin by comparing PM models, whether alone or recast in the EOB framework, against PN models and NR simulations. We will show that PM information has the potential to improve currently-employed models for LIGO and Virgo, especially if recast within the EOB formalism. This is very important, as the PM approximation comes with a host of new computational techniques from particle physics to exploit. Then, we show how a combination of PM and SMR approximations can be employed to access previously-unknown PN orders, deriving the third subleading PN dynamics for spin-orbit and (aligned) spin1-spin2 couplings. Such new results can then be included in the EOB models currently used in GW searches and parameter estimation studies, thereby improving them when the binaries have high spins. Finally, we build an EOB model for quasi-circular nonspinning binaries based on the SMR approximation (rather than the PN one as usually done). We show how this is done in detail without incurring in the divergences that had affected previous attempts, and compare the resultant model against NR simulations. We find that the SMR approximation is an excellent approximation for all (quasi-circular nonspinning) binaries, including both the equal-mass binaries that are routinely detected in GW searches and the ones with highly asymmetric masses. In particular, the SMR-based models compare much better than the PN models, suggesting that SMR-informed EOB models might be the key to model binaries in the future. In the second task of this thesis, we work within the linear-signal ap- proximation and describe generic metrics to predict inference biases on the parameters of a GW source of interest in the presence of confusion noise from unfitted foregrounds and from residuals of other signals that have been incorrectly fitted out. We illustrate the formalism with simple (yet realistic) LISA sources, and demonstrate its validity against Monte-Carlo simulations. The metrics we describe pave the way for more realistic studies to quantify the biases with future ground-based and spaceborne detectors.}, language = {en} } @phdthesis{Giuri2023, author = {Giuri, Chiara}, title = {VERITAS Dark Matter search in dwarf spheroidal galaxies: an extended analysis}, doi = {10.25932/publishup-57586}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-575869}, school = {Universit{\"a}t Potsdam}, pages = {xxiv, 227}, year = {2023}, abstract = {In the last century, several astronomical measurements have supported that a significant percentage (about 22\%) of the total mass of the Universe, on galactic and extragalactic scales, is composed of a mysterious "dark" matter (DM). DM does not interact with the electromagnetic force; in other words it does not reflect, absorb or emit light. It is possible that DM particles are weakly interacting massive particles (WIMPs) that can annihilate (or decay) into Standard Model (SM) particles, and modern very- high-energy (VHE; > 100 GeV) instruments such as imaging atmospheric Cherenkov telescopes (IACTs) can play an important role in constraining the main properties of such DM particles, by detecting these products. One of the most privileged targets where to look for DM signal are dwarf spheroidal galaxies (dSphs), as they are expected to be high DM-dominated objects with a clean, gas-free environment. Some dSphs could be considered as extended sources, considering the angular resolution of IACTs; their angu- lar resolution is adequate to detect extended emission from dSphs. For this reason, we performed an extended-source analysis, by taking into account in the unbinned maximum likelihood estimation both the energy and the angular extension dependency of observed events. The goal was to set more constrained upper limits on the velocity-averaged cross-section annihilation of WIMPs with VERITAS data. VERITAS is an array of four IACTs, able to detect γ-ray photons ranging between 100 GeV and 30 TeV. The results of this extended analysis were compared against the traditional spectral analysis. We found that a 2D analysis may lead to more constrained results, depending on the DM mass, channel, and source. Moreover, in this thesis, the results of a multi-instrument project are presented too. Its goal was to combine already published 20 dSphs data from five different experiments, such as Fermi-LAT, MAGIC, H.E.S.S., VERITAS and HAWC, in order to set upper limits on the WIMP annihilation cross-section in the widest mass range ever reported.}, language = {en} } @article{TopcuFruehwirthMoseretal.2018, author = {Top{\c{c}}u, {\c{C}}ağda{\c{s}} and Fr{\"u}hwirth, Matthias and Moser, Maximilian and Rosenblum, Michael and Pikovskij, Arkadij}, title = {Disentangling respiratory sinus arrhythmia in heart rate variability records}, series = {Physiological Measurement}, volume = {39}, journal = {Physiological Measurement}, number = {5}, publisher = {IOP Publ. Ltd.}, address = {Bristol}, issn = {0967-3334}, doi = {10.1088/1361-6579/aabea4}, pages = {12}, year = {2018}, abstract = {Objective: Several different measures of heart rate variability, and particularly of respiratory sinus arrhythmia, are widely used in research and clinical applications. For many purposes it is important to know which features of heart rate variability are directly related to respiration and which are caused by other aspects of cardiac dynamics. Approach: Inspired by ideas from the theory of coupled oscillators, we use simultaneous measurements of respiratory and cardiac activity to perform a nonlinear disentanglement of the heart rate variability into the respiratory-related component and the rest. Main results: The theoretical consideration is illustrated by the analysis of 25 data sets from healthy subjects. In all cases we show how the disentanglement is manifested in the different measures of heart rate variability. Significance: The suggested technique can be exploited as a universal preprocessing tool, both for the analysis of respiratory influence on the heart rate and in cases when effects of other factors on the heart rate variability are in focus.}, language = {en} } @article{VermaKummerowDenker2018, author = {Verma, Meetu and Kummerow, P. and Denker, Carsten}, title = {On the extent of the moat flow in axisymmetric sunspots}, series = {Astronomische Nachrichten = Astronomical notes}, volume = {339}, journal = {Astronomische Nachrichten = Astronomical notes}, number = {4}, publisher = {Wiley-VCH}, address = {Weinheim}, issn = {0004-6337}, doi = {10.1002/asna.201813482}, pages = {268 -- 276}, year = {2018}, abstract = {Unipolar, axisymmetric sunspots are figuratively called "theoretician's sunspots" because their simplicity supposedly makes them more suitable for theoretical descriptions or numerical models. On November 18, 2013, a very large specimen (active region NOAA 11899) crossed the central meridian of the sun. The moat flow associated with this very large spot is quantitatively compared to that of a medium and a small sunspot to determine the extent of the moat flow in different environments. We employ continuum images and magnetograms of the Helioseismic and Magnetic Imager (HMI) as well as extreme ultraviolet (EUV) images at λ160 nm of the Atmospheric Imaging Assembly (AIA), both on board the Solar Dynamics Observatory (SDO), to measure horizontal proper motions with Local Correlation Tracking (LCT) and flux transport velocities with the Differential Affine Velocity Estimator (DAVE). We compute time-averaged flow maps (±6 hr around meridian passage) and radial averages of photometric, magnetic, and flow properties. Flow fields of a small- and a medium-sized axisymmetric sunspot provide the context for interpreting the results. All sunspots show outward moat flow and the advection of moving magnetic features (MMFs). However, the extent of the moat flow varies from spot to spot, and a correlation of flow properties with size is tenuous, if at all present. The moat flow is asymmetric and predominantly in the east-west direction, whereby deviations are related to the tilt angle of the sunspot group as well as to the topology and activity level of the trailing plage.}, language = {en} } @misc{TopcuFruehwirthMoseretal.2018, author = {Top{\c{c}}u, {\c{C}}ağda{\c{s}} and Fr{\"u}hwirth, Matthias and Moser, Maximilian and Rosenblum, Michael and Pikovskij, Arkadij}, title = {Disentangling respiratory sinus arrhythmia in heart rate variability records}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, number = {913}, issn = {1866-8372}, doi = {10.25932/publishup-43631}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-436315}, pages = {15}, year = {2018}, abstract = {Objective: Several different measures of heart rate variability, and particularly of respiratory sinus arrhythmia, are widely used in research and clinical applications. For many purposes it is important to know which features of heart rate variability are directly related to respiration and which are caused by other aspects of cardiac dynamics. Approach: Inspired by ideas from the theory of coupled oscillators, we use simultaneous measurements of respiratory and cardiac activity to perform a nonlinear disentanglement of the heart rate variability into the respiratory-related component and the rest. Main results: The theoretical consideration is illustrated by the analysis of 25 data sets from healthy subjects. In all cases we show how the disentanglement is manifested in the different measures of heart rate variability. Significance: The suggested technique can be exploited as a universal preprocessing tool, both for the analysis of respiratory influence on the heart rate and in cases when effects of other factors on the heart rate variability are in focus.}, language = {en} } @misc{DrielGesztelyiBakerToeroeketal.2013, author = {Driel-Gesztelyi, L. van and Baker, Daniel N. and T{\"o}r{\"o}k, Tibor and Pariat, Etienne and Green, L. M. and Williams, D. R. and Carlyle, J. and Valori, G. and D{\´e}moulin, Pascal and Matthews, S. A. and Kliem, Bernhard and Malherbe, J.-M.}, title = {Magnetic reconnection driven by filament eruption in the 7 June 2011 event}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch Naturwissenschaftliche Reihe}, number = {608}, doi = {10.25932/publishup-41567}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-415671}, pages = {502 -- 503}, year = {2013}, abstract = {During an unusually massive filament eruption on 7 June 2011, SDO/AIA imaged for the first time significant EUV emission around a magnetic reconnection region in the solar corona. The reconnection occurred between magnetic fields of the laterally expanding CME and a neighbouring active region. A pre-existing quasi-separatrix layer was activated in the process. This scenario is supported by data-constrained numerical simulations of the eruption. Observations show that dense cool filament plasma was re-directed and heated in situ, producing coronal-temperature emission around the reconnection region. These results provide the first direct observational evidence, supported by MHD simulations and magnetic modelling, that a large-scale re-configuration of the coronal magnetic field takes place during solar eruptions via the process of magnetic reconnection.}, language = {en} } @article{KralemannPikovskijRosenblum2014, author = {Kralemann, Bjoern and Pikovskij, Arkadij and Rosenblum, Michael}, title = {Reconstructing effective phase connectivity of oscillator networks from observations}, series = {New journal of physics : the open-access journal for physics}, volume = {16}, journal = {New journal of physics : the open-access journal for physics}, publisher = {IOP Publ. Ltd.}, address = {Bristol}, issn = {1367-2630}, doi = {10.1088/1367-2630/16/8/085013}, pages = {21}, year = {2014}, abstract = {We present a novel approach for recovery of the directional connectivity of a small oscillator network by means of the phase dynamics reconstruction from multivariate time series data. The main idea is to use a triplet analysis instead of the traditional pairwise one. Our technique reveals an effective phase connectivity which is generally not equivalent to a structural one. We demonstrate that by comparing the coupling functions from all possible triplets of oscillators, we are able to achieve in the reconstruction a good separation between existing and non-existing connections, and thus reliably reproduce the network structure.}, language = {en} } @phdthesis{Goswami2014, author = {Goswami, Bedartha}, title = {Uncertainties in climate data analysis}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-78312}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Scientific inquiry requires that we formulate not only what we know, but also what we do not know and by how much. In climate data analysis, this involves an accurate specification of measured quantities and a consequent analysis that consciously propagates the measurement errors at each step. The dissertation presents a thorough analytical method to quantify errors of measurement inherent in paleoclimate data. An additional focus are the uncertainties in assessing the coupling between different factors that influence the global mean temperature (GMT). Paleoclimate studies critically rely on `proxy variables' that record climatic signals in natural archives. However, such proxy records inherently involve uncertainties in determining the age of the signal. We present a generic Bayesian approach to analytically determine the proxy record along with its associated uncertainty, resulting in a time-ordered sequence of correlated probability distributions rather than a precise time series. We further develop a recurrence based method to detect dynamical events from the proxy probability distributions. The methods are validated with synthetic examples and demonstrated with real-world proxy records. The proxy estimation step reveals the interrelations between proxy variability and uncertainty. The recurrence analysis of the East Asian Summer Monsoon during the last 9000 years confirms the well-known `dry' events at 8200 and 4400 BP, plus an additional significantly dry event at 6900 BP. We also analyze the network of dependencies surrounding GMT. We find an intricate, directed network with multiple links between the different factors at multiple time delays. We further uncover a significant feedback from the GMT to the El Ni{\~n}o Southern Oscillation at quasi-biennial timescales. The analysis highlights the need of a more nuanced formulation of influences between different climatic factors, as well as the limitations in trying to estimate such dependencies.}, language = {en} } @phdthesis{Mayer2014, author = {Mayer, Michael}, title = {Pulsar wind nebulae at high energies}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-71504}, school = {Universit{\"a}t Potsdam}, pages = {142}, year = {2014}, abstract = {Pulsar wind nebulae (PWNe) are the most abundant TeV gamma-ray emitters in the Milky Way. The radiative emission of these objects is powered by fast-rotating pulsars, which donate parts of their rotational energy into winds of relativistic particles. This thesis presents an in-depth study of the detected population of PWNe at high energies. To outline general trends regarding their evolutionary behaviour, a time-dependent model is introduced and compared to the available data. In particular, this work presents two exceptional PWNe which protrude from the rest of the population, namely the Crab Nebula and N 157B. Both objects are driven by pulsars with extremely high rotational energy loss rates. Accordingly, they are often referred to as energetic twins. Modelling the non-thermal multi-wavelength emission of N157B gives access to specific properties of this object, like the magnetic field inside the nebula. Comparing the derived parameters to those of the Crab Nebula reveals large intrinsic differences between the two PWNe. Possible origins of these differences are discussed in context of the resembling pulsars. Compared to the TeV gamma-ray regime, the number of detected PWNe is much smaller in the MeV-GeV gamma-ray range. In the latter range, the Crab Nebula stands out by the recent detection of gamma-ray flares. In general, the measured flux enhancements on short time scales of days to weeks were not expected in the theoretical understanding of PWNe. In this thesis, the variability of the Crab Nebula is analysed using data from the Fermi Large Area Telescope (Fermi-LAT). For the presented analysis, a new gamma-ray reconstruction method is used, providing a higher sensitivity and a lower energy threshold compared to previous analyses. The derived gamma-ray light curve of the Crab Nebula is investigated for flares and periodicity. The detected flares are analysed regarding their energy spectra, and their variety and commonalities are discussed. In addition, a dedicated analysis of the flare which occurred in March 2013 is performed. The derived short-term variability time scale is roughly 6h, implying a small region inside the Crab Nebula to be responsible for the enigmatic flares. The most promising theories explaining the origins of the flux eruptions and gamma-ray variability are discussed in detail. In the technical part of this work, a new analysis framework is presented. The introduced software, called gammalib/ctools, is currently being developed for the future CTA observa- tory. The analysis framework is extensively tested using data from the H. E. S. S. experiment. To conduct proper data analysis in the likelihood framework of gammalib/ctools, a model describing the distribution of background events in H.E.S.S. data is presented. The software provides the infrastructure to combine data from several instruments in one analysis. To study the gamma-ray emitting PWN population, data from Fermi-LAT and H. E. S. S. are combined in the likelihood framework of gammalib/ctools. In particular, the spectral peak, which usually lies in the overlap energy regime between these two instruments, is determined with the presented analysis framework. The derived measurements are compared to the predictions from the time-dependent model. The combined analysis supports the conclusion of a diverse population of gamma-ray emitting PWNe.}, language = {en} } @phdthesis{Balzer2014, author = {Balzer, Arnim}, title = {Crab flare observations with H.E.S.S. phase II}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72545}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {The H.E.S.S. array is a third generation Imaging Atmospheric Cherenkov Telescope (IACT) array. It is located in the Khomas Highland in Namibia, and measures very high energy (VHE) gamma-rays. In Phase I, the array started data taking in 2004 with its four identical 13 m telescopes. Since then, H.E.S.S. has emerged as the most successful IACT experiment to date. Among the almost 150 sources of VHE gamma-ray radiation found so far, even the oldest detection, the Crab Nebula, keeps surprising the scientific community with unexplained phenomena such as the recently discovered very energetic flares of high energy gamma-ray radiation. During its most recent flare, which was detected by the Fermi satellite in March 2013, the Crab Nebula was simultaneously observed with the H.E.S.S. array for six nights. The results of the observations will be discussed in detail during the course of this work. During the nights of the flare, the new 24 m × 32 m H.E.S.S. II telescope was still being commissioned, but participated in the data taking for one night. To be able to reconstruct and analyze the data of the H.E.S.S. Phase II array, the algorithms and software used by the H.E.S.S. Phase I array had to be adapted. The most prominent advanced shower reconstruction technique developed by de Naurois and Rolland, the template-based model analysis, compares real shower images taken by the Cherenkov telescope cameras with shower templates obtained using a semi-analytical model. To find the best fitting image, and, therefore, the relevant parameters that describe the air shower best, a pixel-wise log-likelihood fit is done. The adaptation of this advanced shower reconstruction technique to the heterogeneous H.E.S.S. Phase II array for stereo events (i.e. air showers seen by at least two telescopes of any kind), its performance using MonteCarlo simulations as well as its application to real data will be described.}, language = {en} } @phdthesis{Holler2014, author = {Holler, Markus}, title = {Photon reconstruction for the H.E.S.S. 28 m telescope and analysis of Crab Nebula and galactic centre observations}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72099}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {In the presented thesis, the most advanced photon reconstruction technique of ground-based γ-ray astronomy is adapted to the H.E.S.S. 28 m telescope. The method is based on a semi-analytical model of electromagnetic particle showers in the atmosphere. The properties of cosmic γ-rays are reconstructed by comparing the camera image of the telescope with the Cherenkov emission that is expected from the shower model. To suppress the dominant background from charged cosmic rays, events are selected based on several criteria. The performance of the analysis is evaluated with simulated events. The method is then applied to two sources that are known to emit γ-rays. The first of these is the Crab Nebula, the standard candle of ground-based γ-ray astronomy. The results of this source confirm the expected performance of the reconstruction method, where the much lower energy threshold compared to H.E.S.S. I is of particular importance. A second analysis is performed on the region around the Galactic Centre. The analysis results emphasise the capabilities of the new telescope to measure γ-rays in an energy range that is interesting for both theoretical and experimental astrophysics. The presented analysis features the lowest energy threshold that has ever been reached in ground-based γ-ray astronomy, opening a new window to the precise measurement of the physical properties of time-variable sources at energies of several tens of GeV.}, language = {en} } @phdthesis{Thomas2013, author = {Thomas, Bj{\"o}rn Daniel}, title = {Analysis and management of low flows in small catchments of Brandenburg, Germany}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-69247}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Water management and environmental protection is vulnerable to extreme low flows during streamflow droughts. During the last decades, in most rivers of Central Europe summer runoff and low flows have decreased. Discharge projections agree that future decrease in runoff is likely for catchments in Brandenburg, Germany. Depending on the first-order controls on low flows, different adaption measures are expected to be appropriate. Small catchments were analyzed because they are expected to be more vulnerable to a changing climate than larger rivers. They are mainly headwater catchments with smaller ground water storage. Local characteristics are more important at this scale and can increase vulnerability. This thesis mutually evaluates potential adaption measures to sustain minimum runoff in small catchments of Brandenburg, Germany, and similarities of these catchments regarding low flows. The following guiding questions are addressed: (i) Which first-order controls on low flows and related time scales exist? (ii) Which are the differences between small catchments regarding low flow vulnerability? (iii) Which adaption measures to sustain minimum runoff in small catchments of Brandenburg are appropriate considering regional low flow patterns? Potential adaption measures to sustain minimum runoff during periods of low flows can be classified into three categories: (i) increase of groundwater recharge and subsequent baseflow by land use change, land management and artificial ground water recharge, (ii) increase of water storage with regulated outflow by reservoirs, lakes and wetland water management and (iii) regional low flow patterns have to be considered during planning of measures with multiple purposes (urban water management, waste water recycling and inter-basin water transfer). The question remained whether water management of areas with shallow groundwater tables can efficiently sustain minimum runoff. Exemplary, water management scenarios of a ditch irrigated area were evaluated using the model Hydrus-2D. Increasing antecedent water levels and stopping ditch irrigation during periods of low flows increased fluxes from the pasture to the stream, but storage was depleted faster during the summer months due to higher evapotranspiration. Fluxes from this approx. 1 km long pasture with an area of approx. 13 ha ranged from 0.3 to 0.7 l\s depending on scenario. This demonstrates that numerous of such small decentralized measures are necessary to sustain minimum runoff in meso-scale catchments. Differences in the low flow risk of catchments and meteorological low flow predictors were analyzed. A principal component analysis was applied on daily discharge of 37 catchments between 1991 and 2006. Flows decreased more in Southeast Brandenburg according to meteorological forcing. Low flow risk was highest in a region east of Berlin because of intersection of a more continental climate and the specific geohydrology. In these catchments, flows decreased faster during summer and the low flow period was prolonged. A non-linear support vector machine regression was applied to iteratively select meteorological predictors for annual 30-day minimum runoff in 16 catchments between 1965 and 2006. The potential evapotranspiration sum of the previous 48 months was the most important predictor (r²=0.28). The potential evapotranspiration of the previous 3 months and the precipitation of the previous 3 months and last year increased model performance (r²=0.49, including all four predictors). Model performance was higher for catchments with low yield and more damped runoff. In catchments with high low flow risk, explanatory power of long term potential evapotranspiration was high. Catchments with a high low flow risk as well as catchments with a considerable decrease in flows in southeast Brandenburg have the highest demand for adaption. Measures increasing groundwater recharge are to be preferred. Catchments with high low flow risk showed relatively deep and decreasing groundwater heads allowing increased groundwater recharge at recharge areas with higher altitude away from the streams. Low flows are expected to stay low or decrease even further because long term potential evapotranspiration was the most important low flow predictor and is projected to increase during climate change. Differences in low flow risk and runoff dynamics between catchments have to be considered for management and planning of measures which do not only have the task to sustain minimum runoff.}, language = {en} } @phdthesis{Ohme2012, author = {Ohme, Frank}, title = {Bridging the gap between post-Newtonian theory and numerical relativity in gravitational-wave data analysis}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-60346}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {One of the most exciting predictions of Einstein's theory of gravitation that have not yet been proven experimentally by a direct detection are gravitational waves. These are tiny distortions of the spacetime itself, and a world-wide effort to directly measure them for the first time with a network of large-scale laser interferometers is currently ongoing and expected to provide positive results within this decade. One potential source of measurable gravitational waves is the inspiral and merger of two compact objects, such as binary black holes. Successfully finding their signature in the noise-dominated data of the detectors crucially relies on accurate predictions of what we are looking for. In this thesis, we present a detailed study of how the most complete waveform templates can be constructed by combining the results from (A) analytical expansions within the post-Newtonian framework and (B) numerical simulations of the full relativistic dynamics. We analyze various strategies to construct complete hybrid waveforms that consist of a post-Newtonian inspiral part matched to numerical-relativity data. We elaborate on exsisting approaches for nonspinning systems by extending the accessible parameter space and introducing an alternative scheme based in the Fourier domain. Our methods can now be readily applied to multiple spherical-harmonic modes and precessing systems. In addition to that, we analyze in detail the accuracy of hybrid waveforms with the goal to quantify how numerous sources of error in the approximation techniques affect the application of such templates in real gravitational-wave searches. This is of major importance for the future construction of improved models, but also for the correct interpretation of gravitational-wave observations that are made utilizing any complete waveform family. In particular, we comprehensively discuss how long the numerical-relativity contribution to the signal has to be in order to make the resulting hybrids accurate enough, and for currently feasible simulation lengths we assess the physics one can potentially do with template-based searches.}, language = {en} } @phdthesis{Bergner2011, author = {Bergner, Andr{\´e}}, title = {Synchronization in complex systems with multiple time scales}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-53407}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {In the present work synchronization phenomena in complex dynamical systems exhibiting multiple time scales have been analyzed. Multiple time scales can be active in different manners. Three different systems have been analyzed with different methods from data analysis. The first system studied is a large heterogenous network of bursting neurons, that is a system with two predominant time scales, the fast firing of action potentials (spikes) and the burst of repetitive spikes followed by a quiescent phase. This system has been integrated numerically and analyzed with methods based on recurrence in phase space. An interesting result are the different transitions to synchrony found in the two distinct time scales. Moreover, an anomalous synchronization effect can be observed in the fast time scale, i.e. there is range of the coupling strength where desynchronization occurs. The second system analyzed, numerically as well as experimentally, is a pair of coupled CO₂ lasers in a chaotic bursting regime. This system is interesting due to its similarity with epidemic models. We explain the bursts by different time scales generated from unstable periodic orbits embedded in the chaotic attractor and perform a synchronization analysis of these different orbits utilizing the continuous wavelet transform. We find a diverse route to synchrony of these different observed time scales. The last system studied is a small network motif of limit cycle oscillators. Precisely, we have studied a hub motif, which serves as elementary building block for scale-free networks, a type of network found in many real world applications. These hubs are of special importance for communication and information transfer in complex networks. Here, a detailed study on the mechanism of synchronization in oscillatory networks with a broad frequency distribution has been carried out. In particular, we find a remote synchronization of nodes in the network which are not directly coupled. We also explain the responsible mechanism and its limitations and constraints. Further we derive an analytic expression for it and show that information transmission in pure phase oscillators, such as the Kuramoto type, is limited. In addition to the numerical and analytic analysis an experiment consisting of electrical circuits has been designed. The obtained results confirm the former findings.}, language = {en} } @misc{Dietrich2008, type = {Master Thesis}, author = {Dietrich, Jan Philipp}, title = {Phase Space Reconstruction using the frequency domain : a generalization of actual methods}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-50738}, school = {Universit{\"a}t Potsdam}, year = {2008}, abstract = {Phase Space Reconstruction is a method that allows to reconstruct the phase space of a system using only an one dimensional time series as input. It can be used for calculating Lyapunov-exponents and detecting chaos. It helps to understand complex dynamics and their behavior. And it can reproduce datasets which were not measured. There are many different methods which produce correct reconstructions such as time-delay, Hilbert-transformation, derivation and integration. The most used one is time-delay but all methods have special properties which are useful in different situations. Hence, every reconstruction method has some situations where it is the best choice. Looking at all these different methods the questions are: Why can all these different looking methods be used for the same purpose? Is there any connection between all these functions? The answer is found in the frequency domain : Performing a Fourier transformation all these methods getting a similar shape: Every presented reconstruction method can be described as a multiplication in the frequency domain with a frequency-depending reconstruction function. This structure is also known as a filter. From this point of view every reconstructed dimension can be seen as a filtered version of the measured time series. It contains the original data but applies just a new focus: Some parts are amplified and other parts are reduced. Furthermore I show, that not every function can be used for reconstruction. In the thesis three characteristics are identified, which are mandatory for the reconstruction function. Under consideration of these restrictions one gets a whole bunch of new reconstruction functions. So it is possible to reduce noise within the reconstruction process itself or to use some advantages of already known reconstructions methods while suppressing unwanted characteristics of it.}, language = {en} } @misc{Donges2009, type = {Master Thesis}, author = {Donges, Jonathan Friedemann}, title = {Complex networks in the climate system}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-49775}, school = {Universit{\"a}t Potsdam}, year = {2009}, abstract = {Complex network theory provides an elegant and powerful framework to statistically investigate the topology of local and long range dynamical interrelationships, i.e., teleconnections, in the climate system. Employing a refined methodology relying on linear and nonlinear measures of time series analysis, the intricate correlation structure within a multivariate climatological data set is cast into network form. Within this graph theoretical framework, vertices are identified with grid points taken from the data set representing a region on the the Earth's surface, and edges correspond to strong statistical interrelationships between the dynamics on pairs of grid points. The resulting climate networks are neither perfectly regular nor completely random, but display the intriguing and nontrivial characteristics of complexity commonly found in real world networks such as the internet, citation and acquaintance networks, food webs and cortical networks in the mammalian brain. Among other interesting properties, climate networks exhibit the "small-world" effect and possess a broad degree distribution with dominating super-nodes as well as a pronounced community structure. We have performed an extensive and detailed graph theoretical analysis of climate networks on the global topological scale focussing on the flow and centrality measure betweenness which is locally defined at each vertex, but includes global topological information by relying on the distribution of shortest paths between all pairs of vertices in the network. The betweenness centrality field reveals a rich internal structure in complex climate networks constructed from reanalysis and atmosphere-ocean coupled general circulation model (AOGCM) surface air temperature data. Our novel approach uncovers an elaborately woven meta-network of highly localized channels of strong dynamical information flow, that we relate to global surface ocean currents and dub the backbone of the climate network in analogy to the homonymous data highways of the internet. This finding points to a major role of the oceanic surface circulation in coupling and stabilizing the global temperature field in the long term mean (140 years for the model run and 60 years for reanalysis data). Carefully comparing the backbone structures detected in climate networks constructed using linear Pearson correlation and nonlinear mutual information, we argue that the high sensitivity of betweenness with respect to small changes in network structure may allow to detect the footprints of strongly nonlinear physical interactions in the climate system. The results presented in this thesis are thoroughly founded and substantiated using a hierarchy of statistical significance tests on the level of time series and networks, i.e., by tests based on time series surrogates as well as network surrogates. This is particularly relevant when working with real world data. Specifically, we developed new types of network surrogates to include the additional constraints imposed by the spatial embedding of vertices in a climate network. Our methodology is of potential interest for a broad audience within the physics community and various applied fields, because it is universal in the sense of being valid for any spatially extended dynamical system. It can help to understand the localized flow of dynamical information in any such system by combining multivariate time series analysis, a complex network approach and the information flow measure betweenness centrality. Possible fields of application include fluid dynamics (turbulence), plasma physics and biological physics (population models, neural networks, cell models). Furthermore, the climate network approach is equally relevant for experimental data as well as model simulations and hence introduces a novel perspective on model evaluation and data driven model building. Our work is timely in the context of the current debate on climate change within the scientific community, since it allows to assess from a new perspective the regional vulnerability and stability of the climate system while relying on global and not only on regional knowledge. The methodology developed in this thesis hence has the potential to substantially contribute to the understanding of the local effect of extreme events and tipping points in the earth system within a holistic global framework.}, language = {en} } @phdthesis{GholamiGhadikolaei2007, author = {Gholami Ghadikolaei, Iraj}, title = {Data analysis of continuous gravitational waves}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-18800}, school = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {This thesis describes two main projects; the first one is the optimization of a hierarchical search strategy to search for unknown pulsars. This project is divided into two parts; the first part (and the main part) is the semi-coherent hierarchical optimization strategy. The second part is a coherent hierarchical optimization strategy which can be used in a project like Einstein@Home. In both strategies we have found that the 3-stages search is the optimum strategy to search for unknown pulsars. For the second project we have developed a computer software for a coherent Multi-IFO (Interferometer Observatory) search. To validate our software, we have worked on simulated data as well as hardware injected signals of pulsars in the fourth LIGO science run (S4). While with the current sensitivity of our detectors we do not expect to detect any true Gravitational Wave signals in our data, we can still set upper limits on the strength of the gravitational waves signals. These upper limits, in fact, tell us how weak a signal strength we would detect. We have also used our software to set upper limits on the signal strength of known isolated pulsars using LIGO fifth science run (S5) data.}, language = {en} }